diff --git a/.flake8 b/.flake8 index 167f324..624546a 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,4 @@ [flake8] max-line-length = 160 -exclude = .git,.tox,dist,deb_dist,__pycache__ \ No newline at end of file +exclude = .git,.tox,dist,deb_dist,__pycache__,._* +ignore = E121,E123,E126,E226,E24,E704,W503,W504 \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 949bef3..50f23e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,16 @@ # Mediagrains Library Changelog +## 2.6.0 +- Added support for async methods to gsf decoder in python 3.6+ +- Added `Grain.origin_timerange` method. +- Added `Grain.normalise_time` method. +- Added `Colourbars` test signal generator +- Added `MovingBarOverlay` for test signal generators +- Added `mediagrains.numpy` sublibrary for handling video grains as numpy arrays, in python 3.6+ +- Added `PSNR` option to grain compare. +- Support for converting between all uncompressed video grain formats added to `mediagrains.numpy` +- This is the last release that will support python 2.7 (apart from bugfixes) + ## 2.5.3 - BUGFIX: IOBytes doesn't quite fulfil bytes-like contracts, but can be converted to something that does diff --git a/Jenkinsfile b/Jenkinsfile index cd1b440..e6b11be 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -16,17 +16,18 @@ pipeline { agent { - label "ubuntu&&apmm-slave" + label "ubuntu&&apmm-slave&&18.04" } options { ansiColor('xterm') // Add support for coloured output buildDiscarder(logRotator(numToKeepStr: '10')) // Discard old builds } triggers { - cron(env.BRANCH_NAME == 'master' ? 'H H(0-8) * * *' : '') // Build master some time every morning + cron((env.BRANCH_NAME == 'master' || env.BRANCH_NAME == 'dev')? 'H H(0-8) * * *' : '') // Build master some time every morning } parameters { - booleanParam(name: "FORCE_PYUPLOAD", defaultValue: false, description: "Force Python artifact upload") + booleanParam(name: "FORCE_PYPIUPLOAD", defaultValue: false, description: "Force Python artifact upload to PyPi") + booleanParam(name: "FORCE_PYUPLOAD", defaultValue: false, description: "Force Python artifact upload to internal BBC repo") booleanParam(name: "FORCE_DEBUPLOAD", defaultValue: false, description: "Force Debian package upload") booleanParam(name: "FORCE_DOCSUPLOAD", defaultValue: false, description: "Force docs upload") } @@ -43,24 +44,42 @@ pipeline { } stage ("Tests") { parallel { - stage ("Linting Check") { + stage ("Py2.7 Linting Check") { steps { script { - env.lint_result = "FAILURE" + env.lint27_result = "FAILURE" } - bbcGithubNotify(context: "lint/flake8", status: "PENDING") + bbcGithubNotify(context: "lint/flake8_27", status: "PENDING") // Run the linter - sh 'flake8' + sh 'python2.7 -m flake8 --filename=mediagrains/*.py,tests/test_*.py' script { - env.lint_result = "SUCCESS" // This will only run if the sh above succeeded + env.lint27_result = "SUCCESS" // This will only run if the sh above succeeded } } post { always { - bbcGithubNotify(context: "lint/flake8", status: env.lint_result) + bbcGithubNotify(context: "lint/flake8_27", status: env.lint27_result) } } } + stage ("Py36 Linting Check") { + steps { + script { + env.lint3_result = "FAILURE" + } + bbcGithubNotify(context: "lint/flake8_3", status: "PENDING") + // Run the linter + sh 'python3 -m flake8 --filename=mediagrains/*.py,mediagrains_async/*.py,tests/test_*.py,tests/atest_*.py' + script { + env.lint3_result = "SUCCESS" // This will only run if the sh above succeeded + } + } + post { + always { + bbcGithubNotify(context: "lint/flake8_3", status: env.lint3_result) + } + } + } stage ("Build Docs") { steps { sh 'TOXDIR=/tmp/$(basename ${WORKSPACE})/tox-docs make docs' @@ -89,18 +108,18 @@ pipeline { stage ("Python 3 Unit Tests") { steps { script { - env.py3_result = "FAILURE" + env.py36_result = "FAILURE" } - bbcGithubNotify(context: "tests/py3", status: "PENDING") + bbcGithubNotify(context: "tests/py36", status: "PENDING") // Use a workdirectory in /tmp to avoid shebang length limitation - sh 'tox -e py3 --recreate --workdir /tmp/$(basename ${WORKSPACE})/tox-py3' + sh 'tox -e py36 --recreate --workdir /tmp/$(basename ${WORKSPACE})/tox-py36' script { - env.py3_result = "SUCCESS" // This will only run if the sh above succeeded + env.py36_result = "SUCCESS" // This will only run if the sh above succeeded } } post { always { - bbcGithubNotify(context: "tests/py3", status: env.py3_result) + bbcGithubNotify(context: "tests/py36", status: env.py36_result) } } } @@ -162,10 +181,11 @@ pipeline { when { anyOf { expression { return params.FORCE_PYUPLOAD } + expression { return params.FORCE_PYPIUPLOAD } expression { return params.FORCE_DEBUPLOAD } expression { return params.FORCE_DOCSUPLOAD } expression { - bbcShouldUploadArtifacts(branches: ["master"]) + bbcShouldUploadArtifacts(branches: ["master", "dev"]) } } } @@ -175,7 +195,7 @@ pipeline { anyOf { expression { return params.FORCE_DOCSUPLOAD } expression { - bbcShouldUploadArtifacts(branches: ["master"]) + bbcShouldUploadArtifacts(branches: ["master", "dev"]) } } } @@ -186,7 +206,7 @@ pipeline { stage ("Upload to PyPi") { when { anyOf { - expression { return params.FORCE_PYUPLOAD } + expression { return params.FORCE_PYPIUPLOAD } expression { bbcShouldUploadArtifacts(branches: ["master"]) } @@ -199,8 +219,8 @@ pipeline { bbcGithubNotify(context: "pypi/upload", status: "PENDING") sh 'rm -rf dist/*' bbcMakeGlobalWheel("py27") - bbcMakeGlobalWheel("py3") - bbcTwineUpload(toxenv: "py3", pypi: true) + bbcMakeGlobalWheel("py36") + bbcTwineUpload(toxenv: "py36", pypi: true) script { env.pypiUpload_result = "SUCCESS" // This will only run if the steps above succeeded } @@ -211,6 +231,34 @@ pipeline { } } } + stage ("Upload to Artifactory") { + when { + anyOf { + expression { return params.FORCE_PYUPLOAD } + expression { + bbcShouldUploadArtifacts(branches: ["dev"]) + } + } + } + steps { + script { + env.artifactoryUpload_result = "FAILURE" + } + bbcGithubNotify(context: "artifactory/upload", status: "PENDING") + sh 'rm -rf dist/*' + bbcMakeGlobalWheel("py27") + bbcMakeGlobalWheel("py36") + bbcTwineUpload(toxenv: "py36", pypi: false) + script { + env.artifactoryUpload_result = "SUCCESS" // This will only run if the steps above succeeded + } + } + post { + always { + bbcGithubNotify(context: "artifactory/upload", status: env.artifactoryUpload_result) + } + } + } stage ("upload deb") { when { anyOf { @@ -248,7 +296,7 @@ pipeline { } post { always { - bbcSlackNotify(channel: "#apmm-cloudfit") + bbcSlackNotify(channel: "#apmm-cloudfit", branches: ["master", "dev"]) } } } diff --git a/MANIFEST.in b/MANIFEST.in index 55cbb0c..f455058 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -3,5 +3,6 @@ include tox.ini include COPYING recursive-include examples *.gsf recursive-include tests *.py +recursive-include mediagrains_py36 *.py include ICLA.md include LICENSE.md diff --git a/README.md b/README.md index 824a032..938a372 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ documentation for more details. ### Requirements -* A working Python 2.7 or Python 3.x installation +* A working Python 2.7 or Python 3.6+ installation * BBC R&D's internal deb repository set up as a source for apt (if installing via apt-get) * The tool [tox](https://tox.readthedocs.io/en/latest/) is needed to run the unittests, but not required to use the library. @@ -85,6 +85,8 @@ it with colour-bars: ... i += 1 ``` +(In python3.6+ a more natural interface for accessing data exists in the form of numpy arrays. See later.) + The object grain can then be freely used for whatever video processing is desired, or it can be serialised into a GSF file as follows: @@ -159,6 +161,30 @@ between two grains, both as a printed string (as seen above) and also in a data-centric fashion as a tree structure which can be interrogated in code. +### Numpy arrays (Python 3.6+) + +In python 3.6 or higher an additional feature is provided in the form of numpy array access to the data in a grain. As such the above example of creating colourbars can be done more easily: + +```Python console +>>> from mediagrains.numpy import VideoGrain +>>> from uuid import uuid1 +>>> from mediagrains.cogenums import CogFrameFormat, CogFrameLayout +>>> src_id = uuid1() +>>> flow_id = uuid1() +>>> grain = VideoGrain(src_id, flow_id, cog_frame_format=CogFrameFormat.S16_422_10BIT, width=1920, height=1080) +>>> colours = [ +... (0x3FF, 0x000, 0x3FF), +... (0x3FF, 0x3FF, 0x000), +... (0x3FF, 0x000, 0x000), +... (0x3FF, 0x3FF, 0x3FF), +... (0x3FF, 0x200, 0x3FF), +... (0x3FF, 0x3FF, 0x200) ] +>>> for c in range(0, 3): +... for x in range(0, grain.components[c].width): +... for y in range(0, grain.components[c].height): +... grain.component_data[c][x, y] = colours[x*len(colours)//grain.components[c].width][c] +``` + ## Documentation The API is well documented in the docstrings of the module mediagrains, to view: diff --git a/mediagrains/asyncio.py b/mediagrains/asyncio.py new file mode 100644 index 0000000..2ad0429 --- /dev/null +++ b/mediagrains/asyncio.py @@ -0,0 +1,29 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""\ +Asyncio compatible layer for mediagrains, but only available in python 3.6+ +""" + +from sys import version_info + +if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): + from mediagrains_py36.asyncio import AsyncGSFDecoder, AsyncLazyLoaderUnloadedError, loads # noqa: F401 + + __all__ = ["AsyncGSFDecoder", "AsyncLazyLoaderUnloadedError", "loads"] +else: + __all__ = [] diff --git a/mediagrains/cogenums.py b/mediagrains/cogenums.py index e885d1a..7b52cec 100644 --- a/mediagrains/cogenums.py +++ b/mediagrains/cogenums.py @@ -23,9 +23,20 @@ # that python code using it is compatible with this library when specifying # video and audio formats. -from enum import IntEnum +from enum import IntEnum, Enum -__all__ = ['CogFrameFormat', 'CogFrameLayout', 'CogAudioFormat', 'COG_FRAME_IS_PACKED', 'COG_FRAME_IS_COMPRESSED', 'COG_FRAME_FORMAT_BYTES_PER_VALUE'] +__all__ = [ + 'CogFrameFormat', + 'CogFrameLayout', + 'CogAudioFormat', + 'COG_FRAME_IS_PACKED', + 'COG_FRAME_IS_COMPRESSED', + 'COG_FRAME_IS_PLANAR', + 'COG_FRAME_IS_PLANAR_RGB', + 'COG_FRAME_FORMAT_BYTES_PER_VALUE', + 'COG_FRAME_FORMAT_H_SHIFT', + 'COG_FRAME_FORMAT_V_SHIFT', + 'COG_FRAME_FORMAT_ACTIVE_BITS'] class CogFrameFormat(IntEnum): @@ -42,6 +53,7 @@ class CogFrameFormat(IntEnum): U8_444 = 0x2000 U8_422 = 0x2001 U8_420 = 0x2003 + U8_444_RGB = 0x2010 ALPHA_U8 = 0x2080 YUYV = 0x2100 UYVY = 0x2101 @@ -56,20 +68,24 @@ class CogFrameFormat(IntEnum): BGRA = 0x2116 ABGR = 0x2117 S16_444_10BIT = 0x2804 + S16_444_10BIT_RGB = 0x2814 S16_422_10BIT = 0x2805 S16_420_10BIT = 0x2807 ALPHA_S16_10BIT = 0x2884 v210 = 0x2906 S16_444_12BIT = 0x3004 + S16_444_12BIT_RGB = 0x3014 S16_422_12BIT = 0x3005 S16_420_12BIT = 0x3007 ALPHA_S16_12BIT = 0x3084 S16_444 = 0x4004 + S16_444_RGB = 0x4014 S16_422 = 0x4005 S16_420 = 0x4007 ALPHA_S16 = 0x4084 v216 = 0x4105 S32_444 = 0x8008 + S32_444_RGB = 0x8018 S32_422 = 0x8009 S32_420 = 0x800b ALPHA_S32 = 0x8088 @@ -109,6 +125,22 @@ class CogAudioFormat(IntEnum): INVALID = 0xffffffff +class PlanarChromaFormat(IntEnum): + YUV_444 = 0x00 + YUV_422 = 0x01 + YUV_420 = 0x03 + RGB = 0x10 + + +def COG_PLANAR_FORMAT(chroma, depth): + if depth <= 8: + return CogFrameFormat(0 + chroma + (depth << 10)) + elif depth > 16: + return CogFrameFormat(8 + chroma + (depth << 10)) + else: + return CogFrameFormat(4 + chroma + (depth << 10)) + + def COG_FRAME_IS_PACKED(fmt): return ((fmt >> 8) & 0x1) != 0 @@ -117,6 +149,18 @@ def COG_FRAME_IS_COMPRESSED(fmt): return ((fmt >> 9) & 0x1) != 0 +def COG_FRAME_IS_PLANAR(fmt): + return ((fmt >> 8) & 0x3) == 0 + + +def COG_FRAME_IS_ALPHA(fmt): + return ((fmt >> 7) & 0x1) != 0 + + +def COG_FRAME_IS_PLANAR_RGB(fmt): + return ((fmt >> 4) & 0x31) == 1 + + def COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt): if ((fmt) & 0xc) == 0: return 1 @@ -124,3 +168,15 @@ def COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt): return 2 else: return 4 + + +def COG_FRAME_FORMAT_H_SHIFT(fmt): + return (fmt & 0x1) + + +def COG_FRAME_FORMAT_V_SHIFT(fmt): + return ((fmt >> 1) & 0x1) + + +def COG_FRAME_FORMAT_ACTIVE_BITS(fmt): + return (((int(fmt)) >> 10) & 0x3F) diff --git a/mediagrains/comparison/__init__.py b/mediagrains/comparison/__init__.py index cd81829..ac3f15e 100644 --- a/mediagrains/comparison/__init__.py +++ b/mediagrains/comparison/__init__.py @@ -29,8 +29,9 @@ from __future__ import absolute_import from ._internal import GrainComparisonResult, GrainIteratorComparisonResult +from .psnr import compute_psnr -__all__ = ["compare_grain"] +__all__ = ["compare_grain", "compute_psnr"] # diff --git a/mediagrains/comparison/_internal.py b/mediagrains/comparison/_internal.py index 86b41b2..018ba38 100644 --- a/mediagrains/comparison/_internal.py +++ b/mediagrains/comparison/_internal.py @@ -27,7 +27,8 @@ from ..cogenums import CogAudioFormat, CogFrameFormat, COG_FRAME_IS_PACKED, COG_FRAME_IS_COMPRESSED, COG_FRAME_FORMAT_BYTES_PER_VALUE -from .options import Exclude, Include, ComparisonExclude, ComparisonExpectDifferenceMatches +from .options import Exclude, Include, ComparisonExclude, ComparisonExpectDifferenceMatches, ComparisonPSNR +from .psnr import compute_psnr # @@ -353,6 +354,40 @@ def __init__(self, identifier, a, b, expected_difference=TimeOffset(0), **kwargs super(TimestampDifferanceComparisonResult, self).__init__(identifier, a, b, expected_difference=expected_difference, **kwargs) +class PSNRComparisonResult(ComparisonResult): + def __init__(self, identifier, a, b, **kwargs): + """Compute the PSNR for two grains and compare the result with the expected values and comparison operator. + + :param identifier: The path in the grain structure + :param a: A GRAIN + :param b: Another GRAIN + :param kwargs: Other named arguments + """ + super(PSNRComparisonResult, self).__init__(identifier, a, b, **kwargs) + + def compare(self, a, b): + opts = [option for option in self._options if isinstance(option, ComparisonPSNR) and self.identifier == option.path] + + if self.excluded(): + return (False, "For speed reasons not comparing {} and {} when this would be excluded".format(self._identifier.format('a'), + self._identifier.format('b')), []) + + try: + psnr = compute_psnr(a, b) + except NotImplementedError: + return (False, "Grain is not supported for PSNR comparison of {} and {}".format(self._identifier.format('a'), + self._identifier.format('b')), []) + + if all(opt.matcher(psnr) for opt in opts): + return (True, "PSNR({}, {}) == {!r}, meets requirements set in options".format(self._identifier.format('a'), + self._identifier.format('b'), + psnr), []) + else: + return (False, "PSNR({}, {}) == {!r}, does not meet requirements set in options".format(self._identifier.format('a'), + self._identifier.format('b'), + psnr), []) + + class AOnlyComparisonResult(ComparisonResult): def __init__(self, identifier, a, **kwargs): super(AOnlyComparisonResult, self).__init__(identifier, a, None, **kwargs) @@ -594,29 +629,37 @@ def compare(self, a, b): children[key] = EqualityComparisonResult(path, getattr(a, key), getattr(b, key), options=self._options, attr=key) if a.format == b.format: - if COG_FRAME_IS_COMPRESSED(a.format): - wc = 'B' - elif a.format == CogFrameFormat.v210: - wc = 'I' - elif a.format == CogFrameFormat.v216: - wc = 'H' - elif COG_FRAME_IS_PACKED(a.format): - wc = 'B' + path = self._identifier + '.data' + compare_psnr = len([option for option in self._options if isinstance(option, ComparisonPSNR) and path == option.path]) != 0 + if compare_psnr: + children['data'] = PSNRComparisonResult(path, + a, + b, + options=self._options) else: - if COG_FRAME_FORMAT_BYTES_PER_VALUE(a.format) == 1: + if COG_FRAME_IS_COMPRESSED(a.format): wc = 'B' - elif COG_FRAME_FORMAT_BYTES_PER_VALUE(a.format) == 2: - wc = 'H' - elif COG_FRAME_FORMAT_BYTES_PER_VALUE(a.format) == 4: + elif a.format == CogFrameFormat.v210: wc = 'I' - - children['data'] = DataEqualityComparisonResult(self._identifier + ".data", - a.data, - b.data, - options=self._options, - attr="data", - alignment="@", - word_code=wc) + elif a.format == CogFrameFormat.v216: + wc = 'H' + elif COG_FRAME_IS_PACKED(a.format): + wc = 'B' + else: + if COG_FRAME_FORMAT_BYTES_PER_VALUE(a.format) == 1: + wc = 'B' + elif COG_FRAME_FORMAT_BYTES_PER_VALUE(a.format) == 2: + wc = 'H' + elif COG_FRAME_FORMAT_BYTES_PER_VALUE(a.format) == 4: + wc = 'I' + + children['data'] = DataEqualityComparisonResult(self._identifier + ".data", + a.data, + b.data, + options=self._options, + attr="data", + alignment="@", + word_code=wc) else: self._options.append(Exclude.data) children['data'] = FailingComparisonResult(self._identifier + ".data", diff --git a/mediagrains/comparison/options.py b/mediagrains/comparison/options.py index 1213b5a..c0ff9be 100644 --- a/mediagrains/comparison/options.py +++ b/mediagrains/comparison/options.py @@ -44,7 +44,7 @@ from __future__ import absolute_import -__all__ = ["Exclude", "Include", "ExpectedDifference", "CompareOnlyMetadata"] +__all__ = ["Exclude", "Include", "ExpectedDifference", "CompareOnlyMetadata", "PSNR"] # @@ -108,6 +108,53 @@ def __getattr__(self, attr): return _ExpectedDifference(self.path + "." + attr) +class _PSNR(object): + def __init__(self, path): + self.path = path + + def __repr__(self): + return self.path.format("PSNR") + + def __lt__(self, other): + def _compare_psnr(x, other): + for comp_x, comp_other in zip(x, other): + if comp_other is not None and not comp_x < comp_other: + return False + return True + + return ComparisonPSNR(self.path, lambda x: _compare_psnr(x, other), "{} < {!r}".format('PSNR', other)) + + def __le__(self, other): + def _compare_psnr(x, other): + for comp_x, comp_other in zip(x, other): + if comp_other is not None and not comp_x <= comp_other: + return False + return True + + return ComparisonPSNR(self.path, lambda x: _compare_psnr(x, other), "{} <= {!r}".format('PSNR', other)) + + def __gt__(self, other): + def _compare_psnr(x, other): + for comp_x, comp_other in zip(x, other): + if comp_other is not None and not comp_x > comp_other: + return False + return True + + return ComparisonPSNR(self.path, lambda x: _compare_psnr(x, other), "{} > {!r}".format('PSNR', other)) + + def __ge__(self, other): + def _compare_psnr(x, other): + for comp_x, comp_other in zip(x, other): + if comp_other is not None and not comp_x >= comp_other: + return False + return True + + return ComparisonPSNR(self.path, lambda x: _compare_psnr(x, other), "{} >= {!r}".format('PSNR', other)) + + def __getattr__(self, attr): + return _PSNR(self.path + "." + attr) + + class ComparisonOption(object): def __init__(self, path): self.path = path @@ -142,6 +189,16 @@ def __repr__(self): return self._repr +class ComparisonPSNR(ComparisonOption): + def __init__(self, path, matcher, _repr): + self.matcher = matcher + self._repr = _repr + super(ComparisonPSNR, self).__init__(path) + + def __repr__(self): + return self._repr + + Exclude = _Exclude() @@ -152,3 +209,6 @@ def __repr__(self): CompareOnlyMetadata = Exclude.data + + +PSNR = _PSNR("{}") diff --git a/mediagrains/comparison/psnr.py b/mediagrains/comparison/psnr.py new file mode 100644 index 0000000..ae633e8 --- /dev/null +++ b/mediagrains/comparison/psnr.py @@ -0,0 +1,116 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from __future__ import print_function +from __future__ import absolute_import + +from sys import version_info + +if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): + from mediagrains_py36.psnr import compute_psnr + + __all__ = ["compute_psnr"] + +else: + import math + import numpy as np + + from ..cogenums import COG_FRAME_FORMAT_BYTES_PER_VALUE, COG_FRAME_FORMAT_ACTIVE_BITS + from ..cogenums import COG_FRAME_IS_COMPRESSED, COG_FRAME_IS_PACKED + + __all__ = ["compute_psnr"] + + + def _compute_comp_mse(format, data_a, comp_a, data_b, comp_b): + """Compute MSE (Mean Squared Error) for video component. + + Currently supports planar components only. + + :param format: The COG format + :param data_a: Data bytes for GRAIN component a + :param comp_a: COMPONENT for GRAIN a + :param data_b: Data bytes for GRAIN component b + :param comp_b: COMPONENT for GRAIN b + :returns: The MSE value + """ + if COG_FRAME_IS_PACKED(format): + raise NotImplementedError("Packed video format is not supported in this version of python") + + bpp = COG_FRAME_FORMAT_BYTES_PER_VALUE(format) + if bpp == 1: + dtype = np.uint8 + elif bpp == 2: + dtype = np.uint16 + elif bpp == 4: + dtype = np.uint32 + + total = 0 + for y in range(0, comp_a.height): + line_a = data_a[y*comp_a.stride + comp_a.offset:y*comp_a.stride + comp_a.offset + comp_a.width*bpp] + line_b = data_b[y*comp_b.stride + comp_b.offset:y*comp_b.stride + comp_b.offset + comp_b.width*bpp] + np_line_a = np.frombuffer(line_a, dtype=dtype) + np_line_b = np.frombuffer(line_b, dtype=dtype) + total += np.sum(np.square(np.subtract(np_line_a, np_line_b))) + + return total / (comp_a.width*comp_a.height) + + + def _compute_comp_psnr(format, data_a, comp_a, data_b, comp_b, max_val): + """Compute PSNR for video component. + + Currently supports planar components only. + + :param format: The COG format + :param data_a: Data bytes for GRAIN component a + :param comp_a: COMPONENT for GRAIN a + :param data_b: Data bytes for GRAIN component b + :param comp_b: COMPONENT for GRAIN b + :param max_val: Maximum value for a component pixel + :returns: The PSNR + """ + mse = _compute_comp_mse(format, data_a, comp_a, data_b, comp_b) + if mse == 0: + return float('Inf') + else: + return 10.0 * math.log10((max_val**2)/mse) + + + def compute_psnr(grain_a, grain_b): + """Compute PSNR for video grains. + + :param grain_a: A video GRAIN + :param grain_b: A video GRAIN + :returns: A list of PSNR value for each video component + """ + if grain_a.grain_type != grain_b.grain_type or grain_a.grain_type != "video": + raise AttributeError("Invalid grain types") + if grain_a.width != grain_b.width or grain_a.height != grain_b.height: + raise AttributeError("Frame dimensions differ") + + if grain_a.format != grain_b.format: + raise NotImplementedError("Different grain formats not supported") + if COG_FRAME_IS_COMPRESSED(grain_a.format): + raise NotImplementedError("Compressed video is not supported") + + psnr = [] + data_a = bytes(grain_a.data) + data_b = bytes(grain_b.data) + max_val = (1 << COG_FRAME_FORMAT_ACTIVE_BITS(grain_a.format)) - 1 + for comp_a, comp_b in zip(grain_a.components, grain_b.components): + psnr.append(_compute_comp_psnr(grain_a.format, data_a, comp_a, data_b, comp_b, max_val)) + + return psnr diff --git a/mediagrains/grain.py b/mediagrains/grain.py index bd3fd78..f3363dc 100644 --- a/mediagrains/grain.py +++ b/mediagrains/grain.py @@ -27,7 +27,7 @@ from six import string_types from uuid import UUID -from mediatimestamp.immutable import Timestamp, TimeOffset +from mediatimestamp.immutable import Timestamp, TimeOffset, TimeRange from collections import Sequence, MutableSequence, Mapping from fractions import Fraction from copy import copy, deepcopy @@ -120,12 +120,19 @@ class GRAIN(Sequence): How long the data would be expected to be based on what's listed in the metadata -In addition there is a method provided for convenience: +In addition these methods are provided for convenience: final_origin_timestamp() The origin timestamp of the final sample in the grain. For most grain types this is the same as origin_timestamp, but not for audio grains. + +origin_timerange() + The origin time range covered by the samples in the grain. + +normalise_time(value) + Returns a normalised Timestamp, TimeOffset or TimeRange using the video frame rate or audio sample rate. + """ def __init__(self, meta, data): self.meta = meta @@ -202,6 +209,9 @@ def __deepcopy__(self, memo): from .grain_constructors import Grain return Grain(deepcopy(self.meta), deepcopy(self.data)) + def __bytes__(self): + return bytes(self._data) + @property def data(self): return self._data @@ -247,6 +257,12 @@ def origin_timestamp(self, value): def final_origin_timestamp(self): return self.origin_timestamp + def origin_timerange(self): + return TimeRange(self.origin_timestamp, self.final_origin_timestamp(), TimeRange.INCLUSIVE) + + def normalise_time(self, value): + return value + @property def sync_timestamp(self): return Timestamp.from_tai_sec_nsec(self.meta['grain']['sync_timestamp']) @@ -849,16 +865,16 @@ def __init__(self, parent): self.parent = parent def __getitem__(self, key): - return VIDEOGRAIN.COMPONENT(self.parent.meta['grain']['cog_frame']['components'][key]) + return type(self.parent).COMPONENT(self.parent.meta['grain']['cog_frame']['components'][key]) def __setitem__(self, key, value): - self.parent.meta['grain']['cog_frame']['components'][key] = VIDEOGRAIN.COMPONENT(value) + self.parent.meta['grain']['cog_frame']['components'][key] = type(self.parent).COMPONENT(value) def __delitem__(self, key): del self.parent.meta['grain']['cog_frame']['components'][key] def insert(self, key, value): - self.parent.meta['grain']['cog_frame']['components'].insert(key, VIDEOGRAIN.COMPONENT(value)) + self.parent.meta['grain']['cog_frame']['components'].insert(key, type(self.parent).COMPONENT(value)) def __len__(self): return len(self.parent.meta['grain']['cog_frame']['components']) @@ -886,6 +902,11 @@ def __init__(self, meta, data): self.meta['grain']['cog_frame']['layout'] = int(self.meta['grain']['cog_frame']['layout']) self.components = VIDEOGRAIN.COMPONENT_LIST(self) + def normalise_time(self, value): + if self.rate == 0: + return value + return value.normalise(self.rate.numerator, self.rate.denominator) + @property def format(self): return CogFrameFormat(self.meta['grain']['cog_frame']['format']) @@ -1064,6 +1085,11 @@ def __init__(self, meta, data): self.meta['grain']['cog_coded_frame']['format'] = int(self.meta['grain']['cog_coded_frame']['format']) self.meta['grain']['cog_coded_frame']['layout'] = int(self.meta['grain']['cog_coded_frame']['layout']) + def normalise_time(self, value): + if self.rate == 0: + return value + return value.normalise(self.rate.numerator, self.rate.denominator) + @property def format(self): return CogFrameFormat(self.meta['grain']['cog_coded_frame']['format']) @@ -1293,6 +1319,9 @@ def __init__(self, meta, data): def final_origin_timestamp(self): return (self.origin_timestamp + TimeOffset.from_count(self.samples - 1, self.sample_rate, 1)) + def normalise_time(self, value): + return value.normalise(self.sample_rate, 1) + @property def format(self): return CogAudioFormat(self.meta['grain']['cog_audio']['format']) @@ -1427,6 +1456,9 @@ def __init__(self, meta, data): def final_origin_timestamp(self): return (self.origin_timestamp + TimeOffset.from_count(self.samples - 1, self.sample_rate, 1)) + def normalise_time(self, value): + return value.normalise(self.sample_rate, 1) + @property def format(self): return CogAudioFormat(self.meta['grain']['cog_coded_audio']['format']) diff --git a/mediagrains/numpy.py b/mediagrains/numpy.py new file mode 100644 index 0000000..725983c --- /dev/null +++ b/mediagrains/numpy.py @@ -0,0 +1,29 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""\ +Numpy compatible layer for mediagrains, but only available in python 3.6+ +""" + +from sys import version_info + +if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): + from mediagrains_py36.numpy import VideoGrain, VIDEOGRAIN # noqa: F401 + + __all__ = ['VideoGrain', 'VIDEOGRAIN'] +else: + __all__ = [] diff --git a/mediagrains/testsignalgenerator.py b/mediagrains/testsignalgenerator.py index f4618e2..fc423a5 100644 --- a/mediagrains/testsignalgenerator.py +++ b/mediagrains/testsignalgenerator.py @@ -32,25 +32,25 @@ from . import VideoGrain, AudioGrain from .cogenums import CogFrameFormat, CogFrameLayout, CogAudioFormat -__all__ = ["LumaSteps", "Tone1K", "Tone", "Silence"] +__all__ = ["LumaSteps", "Tone1K", "Tone", "Silence", "ColourBars", "MovingBarOverlay"] # information about formats # in the order: -# (num_bytes_per_sample, (offset, range), (offset, range), (offset, range)) +# (num_bytes_per_sample, (offset, range), (offset, range), (offset, range), active_bits_per_sample) # in YUV order pixel_ranges = { - CogFrameFormat.U8_444: (1, (16, 235-16), (128, 224), (128, 224)), - CogFrameFormat.U8_422: (1, (16, 235-16), (128, 224), (128, 224)), - CogFrameFormat.U8_420: (1, (16, 235-16), (128, 224), (128, 224)), - CogFrameFormat.S16_444_10BIT: (2, (64, 940-64), (512, 896), (512, 896)), - CogFrameFormat.S16_422_10BIT: (2, (64, 940-64), (512, 896), (512, 896)), - CogFrameFormat.S16_420_10BIT: (2, (64, 940-64), (512, 896), (512, 896)), - CogFrameFormat.S16_444_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584)), - CogFrameFormat.S16_422_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584)), - CogFrameFormat.S16_420_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584)), - CogFrameFormat.S16_444: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344)), - CogFrameFormat.S16_422: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344)), - CogFrameFormat.S16_420: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344)), + CogFrameFormat.U8_444: (1, (16, 235-16), (128, 224), (128, 224), 8), + CogFrameFormat.U8_422: (1, (16, 235-16), (128, 224), (128, 224), 8), + CogFrameFormat.U8_420: (1, (16, 235-16), (128, 224), (128, 224), 8), + CogFrameFormat.S16_444_10BIT: (2, (64, 940-64), (512, 896), (512, 896), 10), + CogFrameFormat.S16_422_10BIT: (2, (64, 940-64), (512, 896), (512, 896), 10), + CogFrameFormat.S16_420_10BIT: (2, (64, 940-64), (512, 896), (512, 896), 10), + CogFrameFormat.S16_444_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584), 12), + CogFrameFormat.S16_422_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584), 12), + CogFrameFormat.S16_420_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584), 12), + CogFrameFormat.S16_444: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344), 16), + CogFrameFormat.S16_422: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344), 16), + CogFrameFormat.S16_420: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344), 16), } @@ -124,6 +124,127 @@ def LumaSteps(src_id, flow_id, width, height, vg.sync_timestamp = vg.origin_timestamp + +def ColourBars(src_id, flow_id, width, height, + intensity=0.75, + rate=Fraction(25, 1), + origin_timestamp=None, + cog_frame_format=CogFrameFormat.U8_444, + step=1): + """Returns a generator for colour bar video grains in specified format. + :param src_id: source_id for grains + :param flow_id: flow_id for grains + :param width: width of grains + :param height: height of grains + :param intensity: intensity of colour bars (usually 1.0 or 0.75) + :param rate: rate of grains + :param origin_timestamp: the origin timestamp of the first grain. + :param step: The number of grains to increment by each time (values above 1 cause skipping)""" + + if cog_frame_format not in pixel_ranges: + raise ValueError("Not a supported format for this generator") + + _bpp = pixel_ranges[cog_frame_format][0] + _steps = 8 + bs = 16 - pixel_ranges[cog_frame_format][4] + + values = [ + (int((0xFFFF >> bs) * intensity), 0x8000 >> bs, 0x8000 >> bs), + (int((0xE1FF >> bs) * intensity), 0x0000 >> bs, 0x9400 >> bs), + (int((0xB200 >> bs) * intensity), 0xABFF >> bs, 0x0000 >> bs), + (int((0x95FF >> bs) * intensity), 0x2BFF >> bs, 0x15FF >> bs), + (int((0x69FF >> bs) * intensity), 0xD400 >> bs, 0xEA00 >> bs), + (int((0x4C00 >> bs) * intensity), 0x5400 >> bs, 0xFFFF >> bs), + (int((0x1DFF >> bs) * intensity), 0xFFFF >> bs, 0x6BFF >> bs), + (int((0x0000 >> bs) * intensity), 0x8000 >> bs, 0x8000 >> bs)] + + vg = VideoGrain(src_id, flow_id, origin_timestamp=origin_timestamp, + rate=rate, + cog_frame_format=cog_frame_format, + cog_frame_layout=CogFrameLayout.FULL_FRAME, + width=width, + height=height) + + lines = [bytearray(vg.components[0].width*_bpp), bytearray(vg.components[1].width*_bpp), bytearray(vg.components[2].width*_bpp)] + for c in range(0, 3): + for x in range(0, vg.components[c].width): + pos = x//(vg.components[c].width//_steps) + if _bpp == 1: + lines[c][x] = values[pos][c] + elif _bpp == 2: + lines[c][2*x + 0] = values[pos][c] & 0xFF + lines[c][2*x + 1] = (values[pos][c] >> 8) & 0xFF + + + for c in range(0, 3): + for y in range(0, vg.components[c].height): + vg.data[vg.components[c].offset + y*vg.components[c].stride:vg.components[c].offset + y*vg.components[c].stride + vg.components[c].width*_bpp] = lines[c] + + origin_timestamp = vg.origin_timestamp + count = 0 + while True: + yield deepcopy(vg) + count += step + vg.origin_timestamp = origin_timestamp + TimeOffset.from_count(count, + rate.numerator, rate.denominator) + vg.sync_timestamp = vg.origin_timestamp + + +def MovingBarOverlay(grain_gen, height=100, speed=1.0): + """Call this method and pass an iterable of video grains as the first parameter. This method will overlay a moving black bar onto the grains. + + :param grain_gen: An iterable which yields video grains + :param heigh: The height of the bar in pixels + :param speed: A floating point speed in pixels per frame + + :returns: A generator which yields video grains + """ + bar = None + for grain in grain_gen: + v_subs = (grain.components[0].height + grain.components[1].height - 1)//grain.components[1].height + + if bar is None: + if grain.format not in pixel_ranges: + raise ValueError("Not a supported format for this generator") + + _bpp = pixel_ranges[grain.format][0] + + bar = [bytearray(grain.components[0].width*_bpp * height), bytearray(grain.components[1].width*_bpp * height // v_subs), bytearray(grain.components[2].width*_bpp * height // v_subs)] + for y in range(0, height): + for x in range(0, grain.components[0].width): + bar[0][y*grain.components[0].width * _bpp + _bpp*x + 0] = pixel_ranges[grain.format][1][0] & 0xFF + if _bpp > 1: + bar[0][y*grain.components[0].width * _bpp + _bpp*x + 1] = pixel_ranges[grain.format][1][0] >> 8 + for y in range(0, height // v_subs): + for x in range(0, grain.components[1].width): + bar[1][y*grain.components[1].width * _bpp + _bpp*x + 0] = pixel_ranges[grain.format][2][0] & 0xFF + if _bpp > 1: + bar[1][y*grain.components[1].width * _bpp + _bpp*x + 1] = pixel_ranges[grain.format][2][0] >> 8 + bar[2][y*grain.components[2].width * _bpp + _bpp*x + 0] = pixel_ranges[grain.format][3][0] & 0xFF + if _bpp > 1: + bar[2][y*grain.components[2].width * _bpp + _bpp*x + 1] = pixel_ranges[grain.format][3][0] >> 8 + + fnum = int(speed*grain.origin_timestamp.to_count(grain.rate.numerator, grain.rate.denominator)) + + for y in range(0, height): + grain.data[ + grain.components[0].offset + ((fnum + y) % grain.components[0].height)*grain.components[0].stride: + grain.components[0].offset + ((fnum + y) % grain.components[0].height)*grain.components[0].stride + grain.components[0].width*_bpp ] = ( + bar[0][y*grain.components[0].width * _bpp: (y+1)*grain.components[0].width * _bpp]) + for y in range(0, height // v_subs): + grain.data[ + grain.components[1].offset + ((fnum//v_subs + y) % grain.components[1].height)*grain.components[1].stride: + grain.components[1].offset + ((fnum//v_subs + y) % grain.components[1].height)*grain.components[1].stride + grain.components[1].width*_bpp ] = ( + bar[1][y*grain.components[1].width * _bpp: (y+1)*grain.components[1].width * _bpp]) + grain.data[ + grain.components[2].offset + ((fnum//v_subs + y) % grain.components[2].height)*grain.components[2].stride: + grain.components[2].offset + ((fnum//v_subs + y) % grain.components[2].height)*grain.components[2].stride + grain.components[2].width*_bpp ] = ( + bar[2][y*grain.components[2].width * _bpp: (y+1)*grain.components[2].width * _bpp]) + + + yield grain + + def Tone1K(src_id, flow_id, samples=1920, channels=1, diff --git a/mediagrains_py36/__init__.py b/mediagrains_py36/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mediagrains_py36/asyncio/__init__.py b/mediagrains_py36/asyncio/__init__.py new file mode 100644 index 0000000..9d8be9f --- /dev/null +++ b/mediagrains_py36/asyncio/__init__.py @@ -0,0 +1,606 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""\ +Library for handling mediagrains in pure python asyncio compatibility layer. +""" + +import asyncio + +from uuid import UUID +from os import SEEK_SET +from datetime import datetime +from fractions import Fraction + +from mediatimestamp.immutable import Timestamp + +from .aiobytes import AsyncIOBytes, AsyncLazyLoaderUnloadedError +from .bytesaio import BytesAIO + +from mediagrains import Grain +from mediagrains.gsf import GSFDecodeBadVersionError, GSFDecodeBadFileTypeError, GSFDecodeError + +__all__ = ["AsyncGSFDecoder", "AsyncLazyLoaderUnloadedError", "loads"] + + +async def loads(s, cls=None, parse_grain=None, **kwargs): + """Deserialise a GSF file from a string (or similar) into python, + returns a pair of (head, segments) where head is a python dict + containing general metadata from the file, and segments is a dictionary + mapping numeric segment ids to lists of Grain objects. + + If you wish to use a custom AsyncGSFDecoder subclass pass it as cls, if you + wish to use a custom Grain constructor pass it as parse_grain. The + defaults are AsyncGSFDecoder and Grain. Extra kwargs will be passed to the + decoder constructor. + + The custome parse_grain method can be an asynchronous coroutine or a synchronous callable. + + There is no real benefit to using this over the synchronous version, since access to an in-memory buffer is + always going to be synchronous, but this can be used for convenience where you don't want multiple code paths + for synchronous and asynchronous code.""" + if cls is None: + cls = AsyncGSFDecoder + if parse_grain is None: + parse_grain = Grain + dec = cls(BytesAIO(s), parse_grain=parse_grain, **kwargs) + return await dec.decode() + + +class AsyncGSFBlock(): + """A single block in a GSF file, accessed asynchronously + + Has coroutines to read various types from the block. + Must be used as an asynchronous context manager, which will automatically decode the block tag and size, + exposed by the `tag` and `size` attributes. + """ + def __init__(self, file_data, want_tag=None, raise_on_wrong_tag=False): + """Constructor. Unlike the synchronous version does not record the start byte of the block in `block_start` + + :param file_data: An asynchronous readable file-like object positioned at the start of the block + :param want_tag: If set to a tag string, and in a context manager, skip any block without that tag + :param raise_on_wrong_tag: Set to True to raise a GSFDecodeError if the next block isn't `want_tag` + """ + self.file_data = file_data + self.want_tag = want_tag + self.raise_on_wrong_tag = raise_on_wrong_tag + + self.size = None + self.block_start = None + + async def __aenter__(self): + """When used as a context manager record file position and read block size and tag on entry + + - When entering a block, tag and size should be read + - If tag doesn't decode, a GSFDecodeError should be raised + - If want_tag was supplied to the constructor, skip blocks that don't have that tag + - Unless raise_on_wrong_tag was also supplied, in which case raise + + :returns: Instance of AsyncGSFBlock + :raises GSFDecodeError: If the block tag failed to decode as UTF-8, or an unwanted tag was found""" + + self.block_start = await self.file_data.tell() # In binary mode, this should always be in bytes + + while True: + tag_bytes = await self.file_data.read(4) + + try: + self.tag = tag_bytes.decode(encoding="utf-8") + except UnicodeDecodeError: + raise GSFDecodeError( + "Bytes {!r} at location {} do not make a valid tag for a block".format(tag_bytes, self.block_start), + self.block_start + ) + + self.size = await self.read_uint(4) + + if self.want_tag is None or self.tag == self.want_tag: + return self + elif self.tag != self.want_tag and self.raise_on_wrong_tag: + raise GSFDecodeError("Wanted tag {} but got {} at {}".format(self.want_tag, self.tag, self.block_start), + self.block_start) + else: + await self.file_data.seek(self.block_start + self.size, SEEK_SET) + self.block_start = await self.file_data.tell() + + async def __aexit__(self, *args): + """When used as a context manager, exiting context should seek to the block end""" + await self.file_data.seek(self.block_start + self.size, SEEK_SET) + + async def has_child_block(self, strict_blocks=True): + """Checks if there is space for another child block in this block + + Returns true if there is space for another child block (i.e. >= 8 bytes) in this block. + If strict_blocks=True, this block only contains other blocks rather than any other data. As a result, if there + are bytes left, but not enough for another block, raise a GSFDecodeError. + Must be used in a context manager. + + :param strict_blocks: Set to True to raise if a partial block is found + :returns: True if there is spaces for another block + :raises GSFDecodeError: If there is a partial block and strict=True + """ + assert self.size is not None, "has_child_block() only works in a context manager" + + bytes_remaining = await self.get_remaining() + if bytes_remaining >= 8: + return True + elif bytes_remaining != 0 and strict_blocks: + position = await self.file_data.tell() + raise GSFDecodeError("Found a partial block (or parent too small) in '{}' at {}".format(self.tag, position), + position) + else: + return False + + async def child_blocks(self, strict_blocks=True): + """Asynchronous generator for each child block - each yielded block sits within the context manager + + Must be used in a context manager. + + :param strict_blocks: Set to True to raise if a partial block is found + :yields: GSFBlock for child (already acting as a context manager) + :raises GSFDecodeError: If there is a partial block and strict=True + """ + while await self.has_child_block(strict_blocks=strict_blocks): + async with AsyncGSFBlock(self.file_data) as child_block: + yield child_block + + async def get_remaining(self): + """Get the number of bytes left in this block + + Only works in a context manager, will raise an AssertionError if not + + :returns: Number of bytes left in the block + """ + assert self.size is not None, "get_remaining() only works in a context manager" + return (self.block_start + self.size) - await self.file_data.tell() + + async def read_uint(self, length): + """Read an unsigned integer of length `length` + + :param length: Number of bytes used to store the integer + :returns: Unsigned integer + :raises EOFError: If there are fewer than `length` bytes left in the source + """ + r = 0 + uint_bytes = bytes(await self.file_data.read(length)) + + if len(uint_bytes) != length: + raise EOFError("Unable to read enough bytes from source") + + for n in range(0, length): + r += (uint_bytes[n] << (n*8)) + return r + + async def read_bool(self): + """Read a boolean value + + :returns: Boolean value + :raises EOFError: If there are no more bytes left in the source""" + n = await self.read_uint(1) + return (n != 0) + + async def read_sint(self, length): + """Read a 2's complement signed integer + + :param length: Number of bytes used to store the integer + :returns: Signed integer + :raises EOFError: If there are fewer than `length` bytes left in the source + """ + r = await self.read_uint(length) + if (r >> ((8*length) - 1)) == 1: + r -= (1 << (8*length)) + return r + + async def read_string(self, length): + """Read a fixed-length string, treating it as UTF-8 + + :param length: Number of bytes in the string + :returns: String + :raises EOFError: If there are fewer than `length` bytes left in the source + """ + string_data = await self.file_data.read(length) + if (len(string_data) != length): + raise EOFError("Unable to read enough bytes from source") + + return string_data.decode(encoding='utf-8') + + async def read_varstring(self): + """Read a variable length string + + Reads a 2 byte uint to get the string length, then reads a string of that length + + :returns: String + :raises EOFError: If there are too few bytes left in the source + """ + length = await self.read_uint(2) + return await self.read_string(length) + + async def read_uuid(self): + """Read a UUID + + :returns: UUID + :raises EOFError: If there are fewer than l bytes left in the source + """ + uuid_data = await self.file_data.read(16) + + if (len(uuid_data) != 16): + raise EOFError("Unable to read enough bytes from source") + + return UUID(bytes=uuid_data) + + async def read_timestamp(self): + """Read a date-time (with seconds resolution) stored in 7 bytes + + :returns: Datetime + :raises EOFError: If there are fewer than 7 bytes left in the source + """ + year = await self.read_sint(2) + month = await self.read_uint(1) + day = await self.read_uint(1) + hour = await self.read_uint(1) + minute = await self.read_uint(1) + second = await self.read_uint(1) + return datetime(year, month, day, hour, minute, second) + + async def read_ippts(self): + """Read a mediatimestamp.Timestamp + + :returns: Timestamp + :raises EOFError: If there are fewer than 10 bytes left in the source + """ + secs = await self.read_uint(6) + nano = await self.read_uint(4) + return Timestamp(secs, nano) + + async def read_rational(self): + """Read a rational (fraction) + + If numerator or denominator is 0, returns Fraction(0) + + :returns: fraction.Fraction + :raises EOFError: If there are fewer than 8 bytes left in the source + """ + numerator = await self.read_uint(4) + denominator = await self.read_uint(4) + if numerator == 0 or denominator == 0: + return Fraction(0) + else: + return Fraction(numerator, denominator) + + +def asynchronise(f): + async def __inner(*args, **kwargs): + return f(*args, **kwargs) + return __inner + + +class AsyncGSFDecoder(object): + """A decoder for GSF format that operates asynchronously. + + Provides coroutines to decode the header of a GSF file, followed by an asynchronous generator to get each grain, + wrapped in some grain method (mediagrains.Grain by default.) + """ + def __init__(self, file_data, parse_grain=Grain, **kwargs): + """Constructor + + :param parse_grain: Function or coroutine that takes a (metadata dict, buffer) and returns a grain + representation + :param file_data: A readable asynchronous file io-like object similar to those provided by aiofiles + """ + self.Grain = parse_grain + if not asyncio.iscoroutine(self.Grain): + self.Grain = asynchronise(self.Grain) + self.file_data = file_data + self.head = None + self.start_loc = None + + async def _decode_ssb_header(self): + """Find and read the SSB header in the GSF file + + :returns: (major, minor) version tuple + :raises GSFDecodeBadFileTypeError: If the SSB tag shows this isn't a GSF file + """ + + ssb_block = AsyncGSFBlock(self.file_data) + ssb_block.block_start = await self.file_data.tell() + tag = await ssb_block.read_string(8) + + if tag != "SSBBgrsg": + raise GSFDecodeBadFileTypeError("File lacks correct header", ssb_block.block_start, tag) + + major = await ssb_block.read_uint(2) + minor = await ssb_block.read_uint(2) + + return (major, minor) + + async def _decode_head(self, head_block): + """Decode the "head" block and extract ID, created date, segments and tags + + :param head_block: AsyncGSFBlock representing the "head" block + :returns: Head block as a dict + """ + head = {} + head['id'] = await head_block.read_uuid() + head['created'] = await head_block.read_timestamp() + + head['segments'] = [] + head['tags'] = [] + + # Read head block children + async for head_child in head_block.child_blocks(): + # Parse a segment block + if head_child.tag == "segm": + segm = {} + segm['local_id'] = await head_child.read_uint(2) + segm['id'] = await head_child.read_uuid() + segm['count'] = await head_child.read_sint(8) + segm['tags'] = [] + + # Segment blocks can have child tags as well + while await head_child.has_child_block(): + async with AsyncGSFBlock(self.file_data) as segm_tag: + if segm_tag.tag == "tag ": + key = await segm_tag.read_varstring() + value = await segm_tag.read_varstring() + segm['tags'].append((key, value)) + + head['segments'].append(segm) + + # Parse a tag block + elif head_child.tag == "tag ": + key = await head_child.read_varstring() + value = await head_child.read_varstring() + head['tags'].append((key, value)) + + return head + + async def _decode_tils(self, tils_block): + """Decode timelabels (tils) block + + :param tils_block: Instance of AsyncGSFBlock() representing a "gbhd" block + :returns: tils block as a dict + """ + tils = [] + timelabel_count = await tils_block.read_uint(2) + for i in range(0, timelabel_count): + tag = await tils_block.read_string(16) + tag = tag.strip("\x00") + count = await tils_block.read_uint(4) + rate = await tils_block.read_rational() + drop = await tils_block.read_bool() + + tils.append({'tag': tag, + 'timelabel': {'frames_since_midnight': count, + 'frame_rate_numerator': rate.numerator, + 'frame_rate_denominator': rate.denominator, + 'drop_frame': drop}}) + + return tils + + async def _decode_gbhd(self, gbhd_block): + """Decode grain block header ("gbhd") to get grain metadata + + :param gbhd_block: Instance of AsyncGSFBlock() representing a "gbhd" block + :returns: Grain data dict + :raises GSFDecodeError: If "gbhd" block contains an unkown child block + """ + meta = { + "grain": { + } + } + + meta['grain']['source_id'] = await gbhd_block.read_uuid() + meta['grain']['flow_id'] = await gbhd_block.read_uuid() + await self.file_data.seek(16, 1) # Skip over deprecated byte array + meta['grain']['origin_timestamp'] = await gbhd_block.read_ippts() + meta['grain']['sync_timestamp'] = await gbhd_block.read_ippts() + meta['grain']['rate'] = await gbhd_block.read_rational() + meta['grain']['duration'] = await gbhd_block.read_rational() + + async for gbhd_child in gbhd_block.child_blocks(): + if gbhd_child.tag == "tils": + meta['grain']['timelabels'] = await self._decode_tils(gbhd_child) + elif gbhd_child.tag == "vghd": + meta['grain']['grain_type'] = 'video' + meta['grain']['cog_frame'] = {} + meta['grain']['cog_frame']['format'] = await gbhd_child.read_uint(4) + meta['grain']['cog_frame']['layout'] = await gbhd_child.read_uint(4) + meta['grain']['cog_frame']['width'] = await gbhd_child.read_uint(4) + meta['grain']['cog_frame']['height'] = await gbhd_child.read_uint(4) + meta['grain']['cog_frame']['extension'] = await gbhd_child.read_uint(4) + + src_aspect_ratio = await gbhd_child.read_rational() + if src_aspect_ratio != 0: + meta['grain']['cog_frame']['source_aspect_ratio'] = { + 'numerator': src_aspect_ratio.numerator, + 'denominator': src_aspect_ratio.denominator + } + + pixel_aspect_ratio = await gbhd_child.read_rational() + if pixel_aspect_ratio != 0: + meta['grain']['cog_frame']['pixel_aspect_ratio'] = { + 'numerator': pixel_aspect_ratio.numerator, + 'denominator': pixel_aspect_ratio.denominator + } + + meta['grain']['cog_frame']['components'] = [] + if await gbhd_child.has_child_block(): + async with AsyncGSFBlock(self.file_data) as comp_block: + if comp_block.tag != "comp": + continue # Skip unknown/unexpected block + + comp_count = await comp_block.read_uint(2) + offset = 0 + for i in range(0, comp_count): + comp = {} + comp['width'] = await comp_block.read_uint(4) + comp['height'] = await comp_block.read_uint(4) + comp['stride'] = await comp_block.read_uint(4) + comp['length'] = await comp_block.read_uint(4) + comp['offset'] = offset + offset += comp['length'] + meta['grain']['cog_frame']['components'].append(comp) + + elif gbhd_child.tag == 'cghd': + meta['grain']['grain_type'] = "coded_video" + meta['grain']['cog_coded_frame'] = {} + meta['grain']['cog_coded_frame']['format'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_frame']['layout'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_frame']['origin_width'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_frame']['origin_height'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_frame']['coded_width'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_frame']['coded_height'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_frame']['is_key_frame'] = await gbhd_child.read_bool() + meta['grain']['cog_coded_frame']['temporal_offset'] = await gbhd_child.read_sint(4) + + if await gbhd_child.has_child_block(): + async with AsyncGSFBlock(self.file_data) as unof_block: + meta['grain']['cog_coded_frame']['unit_offsets'] = [] + + unit_offsets = await unof_block.read_uint(2) + for i in range(0, unit_offsets): + meta['grain']['cog_coded_frame']['unit_offsets'].append(await unof_block.read_uint(4)) + + elif gbhd_child.tag == "aghd": + meta['grain']['grain_type'] = "audio" + meta['grain']['cog_audio'] = {} + meta['grain']['cog_audio']['format'] = await gbhd_child.read_uint(4) + meta['grain']['cog_audio']['channels'] = await gbhd_child.read_uint(2) + meta['grain']['cog_audio']['samples'] = await gbhd_child.read_uint(4) + meta['grain']['cog_audio']['sample_rate'] = await gbhd_child.read_uint(4) + + elif gbhd_child.tag == "cahd": + meta['grain']['grain_type'] = "coded_audio" + meta['grain']['cog_coded_audio'] = {} + meta['grain']['cog_coded_audio']['format'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_audio']['channels'] = await gbhd_child.read_uint(2) + meta['grain']['cog_coded_audio']['samples'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_audio']['priming'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_audio']['remainder'] = await gbhd_child.read_uint(4) + meta['grain']['cog_coded_audio']['sample_rate'] = await gbhd_child.read_uint(4) + + elif gbhd_child.tag == "eghd": + meta['grain']['grain_type'] = "event" + else: + raise GSFDecodeError( + "Unknown type {} at offset {}".format(gbhd_child.tag, gbhd_child.block_start), + gbhd_child.block_start, + length=gbhd_child.size + ) + + return meta + + async def decode_file_headers(self): + """Verify the file is a supported version, and get the file header + + :returns: File header data (segments and tags) as a dict + :raises GSFDecodeBadVersionError: If the file version is not supported + :raises GSFDecodeBadFileTypeError: If this isn't a GSF file + :raises GSFDecodeError: If the file doesn't have a "head" block + """ + if self.head is not None: + return self.head + + (major, minor) = await self._decode_ssb_header() + if (major, minor) != (7, 0): + raise GSFDecodeBadVersionError("Unknown Version {}.{}".format(major, minor), 0, major, minor) + + try: + async with AsyncGSFBlock(self.file_data, want_tag="head") as head_block: + self.head = await self._decode_head(head_block) + return self.head + except EOFError: + raise GSFDecodeError("No head block found in file", await self.file_data.tell()) + + async def __aenter__(self): + if self.start_loc is None: + self.start_loc = await self.file_data.tell() + await self.decode_file_headers() + return self + + async def __aexit__(self, *args, **kwargs): + if self.start_loc is not None: + await self.file_data.seek(self.start_loc) + self.start_loc = None + + def __aiter__(self): + return self.grains() + + async def grains(self, local_ids=None, load_lazily=True): + """Asynchronous generator to get grains from the GSF file. Skips blocks which aren't "grai". + + The file_data will be positioned after the `grai` block. + + :param local_ids: A list of local-ids to include in the output. If None (the default) then all local-ids will be + included + :param skip_data: If True, grain data blocks will be seeked over and only grain headers will be read + :param load_lazily: If True, the grains returned will be designed to lazily load data from the underlying stream + only when it is needed. These grain data elements will have an extra 'load' coroutine for + triggering this load, and accessing data in their data element without first awaiting this + coroutine will raise an exception. + :yields: (Grain, local_id) tuple for each grain + :raises GSFDecodeError: If grain is invalid (e.g. no "gbhd" child) + """ + await self.decode_file_headers() + + while True: + try: + async with AsyncGSFBlock(self.file_data, want_tag="grai") as grai_block: + if grai_block.size == 0: + return # Terminator block reached + + local_id = await grai_block.read_uint(2) + + if local_ids is not None and local_id not in local_ids: + continue + + async with AsyncGSFBlock(self.file_data, want_tag="gbhd", raise_on_wrong_tag=True) as gbhd_block: + meta = await self._decode_gbhd(gbhd_block) + + data = None + + if await grai_block.has_child_block(): + async with AsyncGSFBlock(self.file_data, want_tag="grdt") as grdt_block: + if await grdt_block.get_remaining() > 0: + if load_lazily: + data = AsyncIOBytes(self.file_data, + await self.file_data.tell(), + await grdt_block.get_remaining()) + else: + data = await self.file_data.read(await grdt_block.get_remaining()) + + yield (await self.Grain(meta, data), local_id) + except EOFError: + return # We ran out of grains to read and hit EOF + + async def decode(self, load_lazily=False): + """Decode a GSF formatted bytes object + + :param load_lazily: If True, the grains returned will be designed to lazily load data from the underlying stream + only when it is needed. These grain data elements will have an extra 'load' coroutine for + triggering this load, and accessing data in their data element without first awaiting this + coroutine will raise an exception. + :returns: A dictionary mapping sequence ids to lists of GRAIN objects (or subclasses of such). + """ + segments = {} + async with self: + async for (grain, local_id) in self.grains(load_lazily=load_lazily): + if local_id not in segments: + segments[local_id] = [] + segments[local_id].append(grain) + + return (self.head, segments) diff --git a/mediagrains_py36/asyncio/aiobytes.py b/mediagrains_py36/asyncio/aiobytes.py new file mode 100644 index 0000000..8d1c237 --- /dev/null +++ b/mediagrains_py36/asyncio/aiobytes.py @@ -0,0 +1,124 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""\ +A simple wrapper class AsyncIOBytes which is an asynchronous version of IOBytes +""" + +from collections.abc import Sequence + + +__all__ = ["AsyncIOBytes"] + + +class AsyncLazyLoaderUnloadedError (Exception): + pass + + +class AsyncLazyLoader (object): + """An object that can be loaded asynchronously as needed. + + In most cases this class should be subclassed to make actually useful classes, but technically it's not + an abstract base class because it *can* be used directly if needed. + + The constructor takes a coroutine taking no parameters which returns an object as its only parameter. + + Unlike the synchronous version loading is not automatic, but can be triggered by awaiting the load coroutine. + """ + + _attributes = [] + + def __init__(self, loader): + """ + :param loader: a coroutine taking no parameters which returns an object + """ + self._object = None + self._loader = loader + + def __getattribute__(self, attr): + if attr in (['_object', '_loader', '__repr__', 'load'] + type(self)._attributes): + return object.__getattribute__(self, attr) + else: + if object.__getattribute__(self, '_object') is None: + raise AsyncLazyLoaderUnloadedError( + "A call to {} was made on an object that hasn't been loaded".format(attr) + ) + return getattr(object.__getattribute__(self, '_object'), attr) + + def __repr__(self): + if object.__getattribute__(self, '_object') is None: + return object.__repr__(self) + else: + return repr(object.__getattribute__(self, '_object')) + + def __setattr__(self, attr, value): + if attr in ['_object', '_loader'] + type(self)._attributes: + return object.__setattr__(self, attr, value) + else: + if object.__getattribute__(self, '_object') is None: + raise AsyncLazyLoaderUnloadedError( + "A call to set {} was made on an object that hasn't been loaded".format(attr) + ) + return setattr(object.__getattribute__(self, '_object'), attr, value) + + async def load(self): + """Await this coroutine to load the actual object""" + _loader = object.__getattribute__(self, "_loader") + object.__setattr__(self, "_object", await _loader()) + + +class AsyncIOBytes (AsyncLazyLoader, Sequence): + """A Bytes-like object that is backed by a seekable Asynchronous IO stream and can be loaded asynchronously by + awaiting its load coroutine. + """ + + _attributes = ['_istream', '_start', '_length', '__len__'] + + def __init__(self, istream, start, length): + """ + :param istream: An instance of an asynchronous seekable readable + :param start: The value to pass to istream.seek to get to the start of this data + :param start: The length of the data + """ + async def __loadbytes(): + loc = await self._istream.tell() + try: + await self._istream.seek(self._start) + _bytes = await self._istream.read(self._length) + finally: + await self._istream.seek(loc) + return _bytes + + AsyncLazyLoader.__init__(self, __loadbytes) + self._istream = istream + self._start = start + self._length = length + + def __len__(self): + if self._object is None: + return self._length + else: + return len(self._object) + + def __repr__(self): + if self._object is None: + return "AsyncIOBytes({!r}, {!r}, {!r})".format(self._istream, self._start, self._length) + else: + return repr(self._object) + + def __getitem__(self, *args, **kwargs): + return self.__getattribute__('__getitem__')(*args, **kwargs) diff --git a/mediagrains_py36/asyncio/bytesaio.py b/mediagrains_py36/asyncio/bytesaio.py new file mode 100644 index 0000000..7786e8e --- /dev/null +++ b/mediagrains_py36/asyncio/bytesaio.py @@ -0,0 +1,76 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""\ +A simple wrapper class BytesAIO which is an asynchronous version of BytesIO +""" + +from io import BytesIO + + +def asynchronise(f): + async def __inner(*args, **kwargs): + return f(*args, **kwargs) + return __inner + + +class BytesAIO(object): + def __init__(self, b): + """Constructor + + :param s: A bytes object""" + self._bytesio = BytesIO(b) + + def __getattr__(self, attr): + if attr in ['getbuffer', + 'getvalue', + 'closed']: + return getattr(self._bytesio, attr) + elif attr in ['read1', + 'readinto1', + 'detach', + 'read', + 'readinto', + 'write', + 'close', + 'fileno', + 'flush', + 'isatty', + 'readable', + 'readline', + 'readlines', + 'seek', + 'seekable', + 'tell', + 'truncate', + 'writeable', + 'writelines']: + return asynchronise(getattr(self._bytesio, attr)) + else: + raise AttributeError + + async def __aenter__(self): + return self._bytesio.__enter__() + + async def __aexit__(self, *args, **kwargs): + return self._bytesio.__exit__(*args, **kwargs) + + def __aiter__(self): + return self + + def __anext__(self): + return next(self._bytesio) diff --git a/mediagrains_py36/numpy/__init__.py b/mediagrains_py36/numpy/__init__.py new file mode 100644 index 0000000..662a315 --- /dev/null +++ b/mediagrains_py36/numpy/__init__.py @@ -0,0 +1,25 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""\ +Library for handling mediagrains in numpy arrays +""" + +from .videograin import VIDEOGRAIN, VideoGrain +from . import convert + +__all__ = ['VideoGrain', 'VIDEOGRAIN'] diff --git a/mediagrains_py36/numpy/convert.py b/mediagrains_py36/numpy/convert.py new file mode 100644 index 0000000..d0dea23 --- /dev/null +++ b/mediagrains_py36/numpy/convert.py @@ -0,0 +1,365 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""\ +Library for converting video grain formats represented as numpy arrays. +""" + +from mediagrains.cogenums import CogFrameFormat, CogFrameLayout, COG_FRAME_FORMAT_ACTIVE_BITS, COG_PLANAR_FORMAT, PlanarChromaFormat +from typing import Callable, List +from uuid import uuid5, UUID +import numpy as np +import numpy.random as npr + +from pdb import set_trace + +from .videograin import VideoGrain, VIDEOGRAIN + + +def distinct_pairs_from(vals): + for i in range(0, len(vals)): + for j in range(i + 1, len(vals)): + yield (vals[i], vals[j]) + + +def compose(first: Callable[[VIDEOGRAIN, VIDEOGRAIN], None], intermediate: CogFrameFormat, second: Callable[[VIDEOGRAIN, VIDEOGRAIN], None]) -> Callable[[VIDEOGRAIN, VIDEOGRAIN], None]: + """Compose two conversion functions together""" + def _inner(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + grain_intermediate = grain_in._similar_grain(intermediate) + + first(grain_in, grain_intermediate) + second(grain_intermediate, grain_out) + return _inner + + +# Some simple conversions can be acheived by just copying the data from one grain to the other with no +# clever work at all. All the cleverness is already present in the code that creates the component array views +# in the mediagrains +def _simple_copy_convert_yuv(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + grain_out.component_data.Y[:,:] = grain_in.component_data.Y + grain_out.component_data.U[:,:] = grain_in.component_data.U + grain_out.component_data.V[:,:] = grain_in.component_data.V + + +def _simple_copy_convert_rgb(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + grain_out.component_data.R[:,:] = grain_in.component_data.R + grain_out.component_data.G[:,:] = grain_in.component_data.G + grain_out.component_data.B[:,:] = grain_in.component_data.B + + +def _int_array_mean(a: np.ndarray, b: np.ndarray) -> np.ndarray: + """This takes the mean of two arrays of integers without risking overflowing intermediate values.""" + return (a//2 + b//2) + ((a&0x1) | (b&0x1)) + + +# Some conversions between YUV colour subsampling systems require a simple mean +def _simple_mean_convert_yuv444__yuv422(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + grain_out.component_data.Y[:,:] = grain_in.component_data.Y + grain_out.component_data.U[:,:] = _int_array_mean(grain_in.component_data.U[0::2, :], grain_in.component_data.U[1::2, :]) + grain_out.component_data.V[:,:] = _int_array_mean(grain_in.component_data.V[0::2, :], grain_in.component_data.V[1::2, :]) + + +def _simple_mean_convert_yuv422__yuv420(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + grain_out.component_data.Y[:,:] = grain_in.component_data.Y + grain_out.component_data.U[:,:] = _int_array_mean(grain_in.component_data.U[:, 0::2], grain_in.component_data.U[:, 1::2]) + grain_out.component_data.V[:,:] = _int_array_mean(grain_in.component_data.V[:, 0::2], grain_in.component_data.V[:, 1::2]) + + +# Other conversions require duplicating samples +def _simple_duplicate_convert_yuv422__yuv444(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + grain_out.component_data.Y[:,:] = grain_in.component_data.Y + + grain_out.component_data.U[0::2, :] = grain_in.component_data.U + grain_out.component_data.U[1::2, :] = grain_in.component_data.U + grain_out.component_data.V[0::2, :] = grain_in.component_data.V + grain_out.component_data.V[1::2, :] = grain_in.component_data.V + + +def _simple_duplicate_convert_yuv420__yuv422(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + grain_out.component_data.Y[:,:] = grain_in.component_data.Y + + grain_out.component_data.U[:, 0::2] = grain_in.component_data.U + grain_out.component_data.U[:, 1::2] = grain_in.component_data.U + grain_out.component_data.V[:, 0::2] = grain_in.component_data.V + grain_out.component_data.V[:, 1::2] = grain_in.component_data.V + + +# Bit depth conversions +def _unbiased_right_shift(a: np.ndarray, n: int) -> np.ndarray: + return (a >> n) + ((a >> (n - 1))&0x1) + +def _bitdepth_down_convert_yuv(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + bitshift = COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) - COG_FRAME_FORMAT_ACTIVE_BITS(grain_out.format) + + grain_out.component_data[0][:] = _unbiased_right_shift(grain_in.component_data[0][:], bitshift) + grain_out.component_data[1][:] = _unbiased_right_shift(grain_in.component_data[1][:], bitshift) + grain_out.component_data[2][:] = _unbiased_right_shift(grain_in.component_data[2][:], bitshift) + +def _bitdepth_down_convert_rgb(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + bitshift = COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) - COG_FRAME_FORMAT_ACTIVE_BITS(grain_out.format) + + grain_out.component_data.R[:] = _unbiased_right_shift(grain_in.component_data.R[:], bitshift) + grain_out.component_data.G[:] = _unbiased_right_shift(grain_in.component_data.G[:], bitshift) + grain_out.component_data.B[:] = _unbiased_right_shift(grain_in.component_data.B[:], bitshift) + + +def _noisy_left_shift(a: np.ndarray, n: int) -> np.ndarray: + rando = ((npr.random_sample(a.shape) * (1 << n)).astype(a.dtype)) & ((1 << n) - 1) + return (a << n) + rando + +def _bitdepth_up_convert_yuv(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + bitshift = COG_FRAME_FORMAT_ACTIVE_BITS(grain_out.format) - COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) + + dt = grain_out.component_data[0].dtype + + grain_out.component_data[0][:] = _noisy_left_shift(grain_in.component_data[0][:].astype(dt), bitshift) + grain_out.component_data[1][:] = _noisy_left_shift(grain_in.component_data[1][:].astype(dt), bitshift) + grain_out.component_data[2][:] = _noisy_left_shift(grain_in.component_data[2][:].astype(dt), bitshift) + +def _bitdepth_up_convert_rgb(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + bitshift = COG_FRAME_FORMAT_ACTIVE_BITS(grain_out.format) - COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) + + dt = grain_out.component_data[0].dtype + + grain_out.component_data.R[:] = _noisy_left_shift(grain_in.component_data.R[:].astype(dt), bitshift) + grain_out.component_data.G[:] = _noisy_left_shift(grain_in.component_data.G[:].astype(dt), bitshift) + grain_out.component_data.B[:] = _noisy_left_shift(grain_in.component_data.B[:].astype(dt), bitshift) + + +# Colourspace conversions (based on rec.709) +def _convert_rgb_to_yuv444(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + bd = COG_FRAME_FORMAT_ACTIVE_BITS(grain_out.format) + (R, G, B) = (grain_in.component_data.R, + grain_in.component_data.G, + grain_in.component_data.B) + + np.clip((R*0.2126 + G*0.7152 + B*0.0722), 0, 1 << bd, out=grain_out.component_data.Y, casting="unsafe") + np.clip((R*-0.114572 - G*0.385428 + B*0.5 + (1 << (bd - 1))), 0, 1 << bd, out=grain_out.component_data.U, casting="unsafe") + np.clip((R*0.5 - G*0.454153 - B*0.045847 + (1 << (bd - 1))), 0, 1 << bd, out=grain_out.component_data.V, casting="unsafe") + + +def _convert_yuv444_to_rgb(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + bd = COG_FRAME_FORMAT_ACTIVE_BITS(grain_in.format) + (Y, U, V) = (grain_in.component_data.Y.astype(np.dtype(np.double)), + grain_in.component_data.U.astype(np.dtype(np.double)) - (1 << (bd - 1)), + grain_in.component_data.V.astype(np.dtype(np.double)) - (1 << (bd - 1))) + + np.clip((Y + V*1.5748), 0, 1 << bd, out=grain_out.component_data.R, casting="unsafe") + np.clip((Y - U*0.187324 - V*0.468124), 0, 1 << bd, out=grain_out.component_data.G, casting="unsafe") + np.clip((Y + U*1.8556), 0, 1 << bd, out=grain_out.component_data.B, casting="unsafe") + + +def _convert_v210_to_yuv422_10bit(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + # This is a v210 -> planar descramble. It's not super fast, but it should be correct + # + # Input data is array of 32-bit words, arranged as a 1d array in repeating blocks of 4 like: + # lsb -> ->msb + # | U0 | Y0 | V0 |X| + # | Y1 | U1 | Y2 |X| + # | V1 | Y3 | U2 |X| + # | Y4 | V2 | Y5 |X| + # ... + + # Our first descramble simple creates arrays containing the first, second, or third sample from each dword: + # first = [U0] [Y1] [V1] [Y4] ... + # second = [Y0] [U1] [Y3] [V2] ... + # third = [V0] [Y2] [U2] [Y5] ... + + first = (grain_in.data & 0x3FF).astype(np.dtype(np.uint16)) + second = ((grain_in.data >> 10) & 0x3FF).astype(np.dtype(np.uint16)) + third = ((grain_in.data >> 20) & 0x3FF).astype(np.dtype(np.uint16)) + + # These arrays are still linear 1d arrays so we reinterpret them as 2d arrays, remembering that v210 has an alignment of 48 pixels horizontally + first.shape = (grain_in.height, 32*((grain_in.width + 47)//48)) + second.shape = (grain_in.height, 32*((grain_in.width + 47)//48)) + third.shape = (grain_in.height, 32*((grain_in.width + 47)//48)) + + # Our usual transpose to make the arrays more convenient + first = first.transpose() + second = second.transpose() + third = third.transpose() + + # Finally we can assign every third entry in the target component_data arrays with every second entry from one of the three intermediate arrays: + # eg: + # Y = [Y0] [ ] [ ] [Y3] [ ] [ ] ... + # Y = [Y0] [Y1] [ ] [Y3] [Y4] [ ] ... + # Y = [Y0] [Y1] [Y2] [Y3] [Y4] [Y5] ... + + grain_out.component_data.Y[0::3, :] = second[0::2, :][0:(grain_in.width + 2)//3, :] + grain_out.component_data.Y[1::3, :] = first[1::2, :][0:(grain_in.width + 1)//3, :] + grain_out.component_data.Y[2::3, :] = third[1::2, :][0:(grain_in.width + 0)//3, :] + + # And similarly for the chroma: + # U = [U0] [ ] [ ] ... + # U = [U0] [U1] [ ] ... + # U = [U0] [U1] [U2] ... + + grain_out.component_data.U[0::3, :] = first[0::4, :][0:(grain_in.width//2 + 2)//3, :] + grain_out.component_data.U[1::3, :] = second[1::4, :][0:(grain_in.width//2 + 1)//3, :] + grain_out.component_data.U[2::3, :] = third[2::4, :][0:(grain_in.width//2 + 0)//3, :] + + # And similarly for the chroma: + # V = [V0] [ ] [ ] ... + # V = [V0] [V1] [ ] ... + # V = [V0] [V1] [V2] ... + + grain_out.component_data.V[0::3, :] = third[0::4, :][0:(grain_in.width//2 + 2)//3, :] + grain_out.component_data.V[1::3, :] = first[2::4, :][0:(grain_in.width//2 + 1)//3, :] + grain_out.component_data.V[2::3, :] = second[3::4, :][0:(grain_in.width//2 + 0)//3, :] + + +def _convert_yuv422_10bit_to_v210(grain_in: VIDEOGRAIN, grain_out: VIDEOGRAIN): + # This won't be fast, but it should work. + + # Take every third entry in each component and arrange them + first = np.zeros((grain_in.height, 32*((grain_in.width + 47)//48)), dtype=np.dtype(np.uint32)).transpose() + second = np.zeros((grain_in.height, 32*((grain_in.width + 47)//48)), dtype=np.dtype(np.uint32)).transpose() + third = np.zeros((grain_in.height, 32*((grain_in.width + 47)//48)), dtype=np.dtype(np.uint32)).transpose() + + first[0::4, :][0:(grain_in.width//2 + 2)//3, :] = grain_in.component_data.U[0::3, :] + first[1::2, :][0:(grain_in.width + 1)//3, :] = grain_in.component_data.Y[1::3, :] + first[2::4, :][0:(grain_in.width//2 + 1)//3, :] = grain_in.component_data.V[1::3, :] + + second[0::2, :][0:(grain_in.width + 2)//3, :] = grain_in.component_data.Y[0::3, :] + second[1::4, :][0:(grain_in.width//2 + 1)//3, :] = grain_in.component_data.U[1::3, :] + second[3::4, :][0:(grain_in.width//2 + 0)//3, :] = grain_in.component_data.V[2::3, :] + + third[0::4, :][0:(grain_in.width//2 + 2)//3, :] = grain_in.component_data.V[0::3, :] + third[1::2, :][0:(grain_in.width + 0)//3, :] = grain_in.component_data.Y[2::3, :] + third[2::4, :][0:(grain_in.width//2 + 0)//3, :] = grain_in.component_data.U[2::3, :] + + # Now combine them to make the dwords expected + grain_out.data[:] = np.ravel(first.transpose()) + (np.ravel(second.transpose()) << 10) + (np.ravel(third.transpose()) << 20) + + +# These methods automate the process of registering simple copy conversions +def _register_simple_copy_conversions_for_formats_yuv(fmts: List[CogFrameFormat]): + for i in range(0, len(fmts)): + for j in range(i+1, len(fmts)): + VIDEOGRAIN.grain_conversion(fmts[i], fmts[j])(_simple_copy_convert_yuv) + VIDEOGRAIN.grain_conversion(fmts[j], fmts[i])(_simple_copy_convert_yuv) + +def _register_simple_copy_conversions_for_formats_rgb(fmts: List[CogFrameFormat]): + for i in range(0, len(fmts)): + for j in range(i+1, len(fmts)): + VIDEOGRAIN.grain_conversion(fmts[i], fmts[j])(_simple_copy_convert_rgb) + VIDEOGRAIN.grain_conversion(fmts[j], fmts[i])(_simple_copy_convert_rgb) + + +def _equivalent_formats(fmt: CogFrameFormat) -> List[CogFrameFormat]: + equiv_categories = [ + (CogFrameFormat.U8_422, CogFrameFormat.UYVY, CogFrameFormat.YUYV), + (CogFrameFormat.S16_422, CogFrameFormat.v216), + (CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB, CogFrameFormat.RGBx, CogFrameFormat.xRGB, CogFrameFormat.BGRx, CogFrameFormat.xBGR)] + + for cat in equiv_categories: + if fmt in cat: + return cat + return (fmt,) + + +_register_simple_copy_conversions_for_formats_yuv(_equivalent_formats(CogFrameFormat.U8_422)) +_register_simple_copy_conversions_for_formats_yuv(_equivalent_formats(CogFrameFormat.S16_422)) +_register_simple_copy_conversions_for_formats_rgb(_equivalent_formats(CogFrameFormat.U8_444_RGB)) + +# 8 and 16 bit YUV colour subsampling conversions +for bd in [8, 10, 12, 16, 32]: + for fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_422, bd)): + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, bd), fmt)(_simple_mean_convert_yuv444__yuv422) + VIDEOGRAIN.grain_conversion(fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_420, bd))(_simple_mean_convert_yuv422__yuv420) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_420, bd), fmt)(_simple_duplicate_convert_yuv420__yuv422) + VIDEOGRAIN.grain_conversion(fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, bd))(_simple_duplicate_convert_yuv422__yuv444) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, bd), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_420, bd))(compose(_simple_mean_convert_yuv444__yuv422, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_422, bd), _simple_mean_convert_yuv422__yuv420)) + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_420, bd), COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, bd))(compose(_simple_duplicate_convert_yuv420__yuv422, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_422, bd), _simple_duplicate_convert_yuv422__yuv444)) + + +# Bit depth conversions +for (d1, d2) in distinct_pairs_from([8, 10, 12, 16, 32]): + for ss in [PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422, PlanarChromaFormat.YUV_444]: + for fmt1 in _equivalent_formats(COG_PLANAR_FORMAT(ss, d1)): + for fmt2 in _equivalent_formats(COG_PLANAR_FORMAT(ss, d2)): + VIDEOGRAIN.grain_conversion(fmt2, fmt1)(_bitdepth_down_convert_yuv) + VIDEOGRAIN.grain_conversion(fmt1, fmt2)(_bitdepth_up_convert_yuv) + for ss in [PlanarChromaFormat.RGB]: + for fmt1 in _equivalent_formats(COG_PLANAR_FORMAT(ss, d1)): + for fmt2 in _equivalent_formats(COG_PLANAR_FORMAT(ss, d2)): + VIDEOGRAIN.grain_conversion(fmt2, fmt1)(_bitdepth_down_convert_rgb) + VIDEOGRAIN.grain_conversion(fmt1, fmt2)(_bitdepth_up_convert_rgb) + + +# Colourspace conversion +for d in [8, 10, 12, 16, 32]: + for fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d)): + VIDEOGRAIN.grain_conversion(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d), fmt)(_convert_yuv444_to_rgb) + VIDEOGRAIN.grain_conversion(fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d))(_convert_rgb_to_yuv444) + + +# V210 <-> 10 bit 4:2:2 YUV +VIDEOGRAIN.grain_conversion(CogFrameFormat.v210, CogFrameFormat.S16_422_10BIT)(_convert_v210_to_yuv422_10bit) +VIDEOGRAIN.grain_conversion(CogFrameFormat.S16_422_10BIT, CogFrameFormat.v210)(_convert_yuv422_10bit_to_v210) + +# We have a number of transformations that aren't supported directly, but are via an intermediate format +# Bit depth and chroma combination conversions +for (ss1, ss2) in distinct_pairs_from([PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422, PlanarChromaFormat.YUV_444]): + for (d1, d2) in distinct_pairs_from([8, 10, 12, 16, 32]): + for fmt11 in _equivalent_formats(COG_PLANAR_FORMAT(ss1, d1)): + for fmt22 in _equivalent_formats(COG_PLANAR_FORMAT(ss2, d2)): + VIDEOGRAIN.grain_conversion_two_step(fmt11, COG_PLANAR_FORMAT(ss2, d1), fmt22) + VIDEOGRAIN.grain_conversion_two_step(fmt22, COG_PLANAR_FORMAT(ss1, d2), fmt11) + for fmt12 in _equivalent_formats(COG_PLANAR_FORMAT(ss1, d2)): + for fmt21 in _equivalent_formats(COG_PLANAR_FORMAT(ss2, d1)): + VIDEOGRAIN.grain_conversion_two_step(fmt12, COG_PLANAR_FORMAT(ss2, d2), fmt21) + VIDEOGRAIN.grain_conversion_two_step(fmt21, COG_PLANAR_FORMAT(ss2, d2), fmt12) + +# RGB and non-444 YUV at same bit-depth +for d in [8, 10, 12, 16, 32]: + for rgb_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d)): + for ss in [PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422]: + for yuv_fmt in _equivalent_formats(COG_PLANAR_FORMAT(ss, d)): + VIDEOGRAIN.grain_conversion_two_step(rgb_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d), yuv_fmt) + VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d), rgb_fmt) + +# RGB and YUV-444 with bit-depth conversion +for (d1, d2) in distinct_pairs_from([8, 10, 12, 16, 32]): + for rgb_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d1)): + for yuv_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d2)): + VIDEOGRAIN.grain_conversion_two_step(rgb_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d1), yuv_fmt) + VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d2), rgb_fmt) + for rgb_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d2)): + for yuv_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d1)): + VIDEOGRAIN.grain_conversion_two_step(rgb_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d2), yuv_fmt) + VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d1), rgb_fmt) + +# RGB to YUV with bit-depth and colour subsampling conversion +for (d1, d2) in distinct_pairs_from([8, 10, 12, 16, 32]): + for ss in [PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422]: + for rgb_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d1)): + for yuv_fmt in _equivalent_formats(COG_PLANAR_FORMAT(ss, d2)): + VIDEOGRAIN.grain_conversion_two_step(rgb_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d1), yuv_fmt) + VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d1), rgb_fmt) + for rgb_fmt in _equivalent_formats(COG_PLANAR_FORMAT(PlanarChromaFormat.RGB, d2)): + for yuv_fmt in _equivalent_formats(COG_PLANAR_FORMAT(ss, d1)): + VIDEOGRAIN.grain_conversion_two_step(rgb_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d2), yuv_fmt) + VIDEOGRAIN.grain_conversion_two_step(yuv_fmt, COG_PLANAR_FORMAT(PlanarChromaFormat.YUV_444, d2), rgb_fmt) + +# Conversions from v210 to other formats +for d in [8, 10, 12, 16, 32]: + for ss in [PlanarChromaFormat.YUV_420, PlanarChromaFormat.YUV_422, PlanarChromaFormat.YUV_444, PlanarChromaFormat.RGB]: + for fmt in _equivalent_formats(COG_PLANAR_FORMAT(ss, d)): + if fmt != CogFrameFormat.S16_422_10BIT: + VIDEOGRAIN.grain_conversion_two_step(CogFrameFormat.v210, CogFrameFormat.S16_422_10BIT, fmt) + VIDEOGRAIN.grain_conversion_two_step(fmt, CogFrameFormat.S16_422_10BIT, CogFrameFormat.v210) \ No newline at end of file diff --git a/mediagrains_py36/numpy/videograin.py b/mediagrains_py36/numpy/videograin.py new file mode 100644 index 0000000..5154ce3 --- /dev/null +++ b/mediagrains_py36/numpy/videograin.py @@ -0,0 +1,305 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""\ +Library for handling mediagrains in numpy arrays +""" + +from mediagrains.cogenums import ( + CogFrameFormat, + COG_FRAME_IS_PACKED, + COG_FRAME_IS_COMPRESSED, + COG_FRAME_IS_PLANAR, + COG_FRAME_FORMAT_BYTES_PER_VALUE, + COG_FRAME_IS_PLANAR_RGB) +from mediagrains import grain as bytesgrain +from mediagrains import grain_constructors as bytesgrain_constructors +from copy import copy, deepcopy +import uuid + +import numpy as np +from numpy.lib.stride_tricks import as_strided + +from typing import Callable + +from enum import Enum, auto + + +__all__ = ['VideoGrain', 'VIDEOGRAIN'] + + +def _dtype_from_cogframeformat(fmt: CogFrameFormat) -> np.dtype: + """This method returns the numpy "data type" for a particular video format. + + For planar and padded formats this is the size of the native integer type that is used to handle the samples (eg. 8bit, 16bit, etc ...) + For weird packed formats like v210 (10-bit samples packed so that there are 3 10-bit samples in every 32-bit word) this is not possible. + Instead for v210 we return uint32, since that is the most useful native data type that always corresponds to an integral number of samples. + """ + if COG_FRAME_IS_PLANAR(fmt): + if COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 1: + return np.dtype(np.uint8) + elif COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 2: + return np.dtype(np.uint16) + elif COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt) == 4: + return np.dtype(np.uint32) + elif fmt in [CogFrameFormat.UYVY, + CogFrameFormat.YUYV, + CogFrameFormat.AYUV, + CogFrameFormat.RGB, + CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx, + CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: + return np.dtype(np.uint8) + elif fmt == CogFrameFormat.v216: + return np.dtype(np.uint16) + elif fmt == CogFrameFormat.v210: + return np.dtype(np.uint32) + + raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") + + +class ComponentDataList(list): + class ComponentOrder (Enum): + YUV = auto() + RGB = auto() + BGR = auto() + X = auto() + + def __init__(self, data: list, arrangement: ComponentOrder=ComponentOrder.X): + super().__init__(data) + if arrangement == ComponentDataList.ComponentOrder.YUV: + self.Y = self[0] + self.U = self[1] + self.V = self[2] + elif arrangement == ComponentDataList.ComponentOrder.RGB: + self.R = self[0] + self.G = self[1] + self.B = self[2] + elif arrangement == ComponentDataList.ComponentOrder.BGR: + self.B = self[0] + self.G = self[1] + self.R = self[2] + + +def _component_arrangement_from_format(fmt: CogFrameFormat): + """This method returns the ordering of the components in the component data arrays that are used to represent a particular format. + + Note that for the likes of UYVY this will return YUV since the planes are represented in that order by the interface even though they + are interleved in the data. + + For formats where no meaningful component access can be provided (v210, compressed formats, etc ...) the value X is returned. + """ + if COG_FRAME_IS_PLANAR(fmt): + if COG_FRAME_IS_PLANAR_RGB(fmt): + return ComponentDataList.ComponentOrder.RGB + else: + return ComponentDataList.ComponentOrder.YUV + elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV, CogFrameFormat.v216, CogFrameFormat.AYUV]: + return ComponentDataList.ComponentOrder.YUV + elif fmt in [CogFrameFormat.RGB, CogFrameFormat.RGBA, CogFrameFormat.RGBx, CogFrameFormat.ARGB, CogFrameFormat.xRGB]: + return ComponentDataList.ComponentOrder.RGB + elif fmt in [CogFrameFormat.BGRA, CogFrameFormat.BGRx, CogFrameFormat.xBGR, CogFrameFormat.ABGR]: + return ComponentDataList.ComponentOrder.BGR + else: + return ComponentDataList.ComponentOrder.X + + +def _component_arrays_for_interleaved_422(data0: np.ndarray, data1: np.ndarray, data2: np.ndarray, width: int, height: int, stride: int, itemsize: int): + return [ + as_strided(data0, + shape=(height, width), + strides=(stride, itemsize*2)).transpose(), + as_strided(data1, + shape=(height, width//2), + strides=(stride, itemsize*4)).transpose(), + as_strided(data2, + shape=(height, width//2), + strides=(stride, itemsize*4)).transpose()] + + +def _component_arrays_for_interleaved_444_take_three(data0: np.ndarray, data1: np.ndarray, data2: np.ndarray, width: int, height: int, stride: int, itemsize: int, num_components: int = 3): + return [ + as_strided(data0, + shape=(height, width), + strides=(stride, itemsize*num_components)).transpose(), + as_strided(data1, + shape=(height, width), + strides=(stride, itemsize*num_components)).transpose(), + as_strided(data2, + shape=(height, width), + strides=(stride, itemsize*num_components)).transpose()] + + +def _component_arrays_for_data_and_type(data: np.ndarray, fmt: CogFrameFormat, components: bytesgrain.VIDEOGRAIN.COMPONENT_LIST): + """This method returns a list of numpy array views which can be used to directly access the components of the video frame + without any need for conversion or copying. This is not possible for all formats. + + For planar formats this simply returns a list of array views of the planes. + + For interleaved formats this returns a list of array views that use stride tricks to access alternate elements in the source data array. + + For weird packed formats like v210 nothing can be done, an empty list is returned since no individual component access is possible. + """ + if COG_FRAME_IS_PLANAR(fmt): + return [ + as_strided(data[component.offset//data.itemsize:(component.offset + component.length)//data.itemsize], + shape=(component.height, component.width), + strides=(component.stride, data.itemsize)).transpose() + for component in components] + elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.v216]: + # Either 8 or 16 bits 4:2:2 interleavedd in UYVY order + return _component_arrays_for_interleaved_422(data[1:], data, data[2:], components[0].width, components[0].height, components[0].stride, data.itemsize) + elif fmt == CogFrameFormat.YUYV: + # 8 bit 4:2:2 interleaved in YUYV order + return _component_arrays_for_interleaved_422(data, data[1:], data[3:], components[0].width, components[0].height, components[0].stride, data.itemsize) + elif fmt == CogFrameFormat.RGB: + # 8 bit 4:4:4 three components interleaved in RGB order + return _component_arrays_for_interleaved_444_take_three(data, data[1:], data[2:], components[0].width, components[0].height, components[0].stride, data.itemsize) + elif fmt in [CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx]: + # 8 bit 4:4:4:4 four components interleave dropping the fourth component + return _component_arrays_for_interleaved_444_take_three(data, data[1:], data[2:], components[0].width, components[0].height, components[0].stride, data.itemsize, num_components=4) + elif fmt in [CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR, + CogFrameFormat.AYUV]: + # 8 bit 4:4:4:4 four components interleave dropping the first component + return _component_arrays_for_interleaved_444_take_three(data[1:], data[2:], data[3:], components[0].width, components[0].height, components[0].stride, data.itemsize, num_components=4) + elif fmt == CogFrameFormat.v210: + # v210 is barely supported. Convert it to something else to actually use it! + # This method returns an empty list because component access isn't supported, but + # the more basic access to the underlying data is. + return [] + + raise NotImplementedError("Cog Frame Format not amongst those supported for numpy array interpretation") + + +class VIDEOGRAIN (bytesgrain.VIDEOGRAIN): + _grain_conversions = {} + + def __init__(self, meta, data): + super().__init__(meta, data) + self._data = np.frombuffer(self._data, dtype=_dtype_from_cogframeformat(self.format)) + self.component_data = ComponentDataList( + _component_arrays_for_data_and_type(self._data, self.format, self.components), + arrangement=_component_arrangement_from_format(self.format)) + + def __array__(self): + return np.array(self.data) + + def __bytes__(self): + return bytes(self.data) + + def __copy__(self): + return VideoGrain(copy(self.meta), self.data) + + def __deepcopy__(self, memo): + return VideoGrain(deepcopy(self.meta), self.data.copy()) + + def __repr__(self): + if self.data is None: + return "{}({!r})".format(self._factory, self.meta) + else: + return "{}({!r},< numpy data of length {} >)".format(self._factory, self.meta, len(self.data)) + + @classmethod + def grain_conversion(cls, fmt_in: CogFrameFormat, fmt_out: CogFrameFormat): + """Decorator to apply to all grain conversion functions""" + def _inner(f: Callable[[cls, cls], None]) -> None: + cls._grain_conversions[(fmt_in, fmt_out)] = f + return f + return _inner + + @classmethod + def grain_conversion_two_step(cls, fmt_in: CogFrameFormat, fmt_mid: CogFrameFormat, fmt_out: CogFrameFormat): + """Register a grain conversion via an intermediate format, using existing conversions""" + def _inner(grain_in: "VIDEOGRAIN", grain_out: "VIDEOGRAIN"): + grain_mid = grain_in._similar_grain(fmt_mid) + cls._get_grain_conversion_function(fmt_in, fmt_mid)(grain_in, grain_mid) + cls._get_grain_conversion_function(fmt_mid, fmt_out)(grain_mid, grain_out) + cls.grain_conversion(fmt_in, fmt_out)(_inner) + + @classmethod + def _get_grain_conversion_function(cls, fmt_in: CogFrameFormat, fmt_out: CogFrameFormat) -> Callable[["VIDEOGRAIN", "VIDEOGRAIN"], None]: + """Return the registered grain conversion function for a specified type conversion, or raise NotImplementedError""" + if (fmt_in, fmt_out) in cls._grain_conversions: + return cls._grain_conversions[(fmt_in, fmt_out)] + + raise NotImplementedError("This conversion has not yet been implemented") + + def flow_id_for_converted_flow(self, fmt: CogFrameFormat) -> uuid.UUID: + return uuid.uuid5(self.flow_id, "FORMAT_CONVERSION: {!r}".format(fmt)) + + def _similar_grain(self, fmt: CogFrameFormat) -> "VIDEOGRAIN": + """Returns a new empty grain that has the specified format, but other parameters identical to this grain.""" + return VideoGrain(self.source_id, + self.flow_id_for_converted_flow(fmt), + origin_timestamp=self.origin_timestamp, + sync_timestamp=self.sync_timestamp, + cog_frame_format=fmt, + width=self.width, + height=self.height, + rate=self.rate, + duration=self.duration, + cog_frame_layout=self.layout) + + def convert(self, fmt: CogFrameFormat) -> "VIDEOGRAIN": + """Used to convert this grain to a different cog format. Always produces a new grain. + + :param fmt: The format to convert to + :returns: A new grain of the specified format. Notably converting to the same format is the same as a deepcopy + :raises: NotImplementedError if the requested conversion is not possible + """ + if self.format == fmt: + return deepcopy(self) + else: + grain_out = self._similar_grain(fmt) + self.__class__._get_grain_conversion_function(self.format, fmt)(self, grain_out) + return grain_out + + def asformat(self, fmt: CogFrameFormat) -> "VIDEOGRAIN": + """Used to ensure that this grain is in a particular format. Converts it if not. + + :param fmt: The format to ensure + :returns: self or a new grain. + :raises NotImplementedError if the requested conversion is not possible. + """ + if self.format == fmt: + return self + else: + return self.convert(fmt) + + +def VideoGrain(*args, **kwargs) -> VIDEOGRAIN: + """If the first argument is a mediagrains.VIDEOGRAIN then return a mediagrains.numpy.VIDEOGRAIN representing the same data. + + Otherwise takes the same parameters as mediagrains.VideoGrain and returns the same grain converted into a mediagrains.numpy.VIDEOGRAIN + """ + if len(args) == 1 and isinstance(args[0], bytesgrain.VIDEOGRAIN): + rawgrain = args[0] + else: + rawgrain = bytesgrain_constructors.VideoGrain(*args, **kwargs) + + return VIDEOGRAIN(rawgrain.meta, rawgrain.data) diff --git a/mediagrains_py36/psnr.py b/mediagrains_py36/psnr.py new file mode 100644 index 0000000..e3f53ec --- /dev/null +++ b/mediagrains_py36/psnr.py @@ -0,0 +1,75 @@ +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import math +import numpy as np + +from mediagrains.cogenums import COG_FRAME_IS_COMPRESSED, COG_FRAME_FORMAT_ACTIVE_BITS +from mediagrains.numpy import VideoGrain as numpy_VideoGrain, VIDEOGRAIN as numpy_VIDEOGRAIN + +__all__ = ["compute_psnr"] + + +def _compute_comp_mse(data_a, data_b): + """Compute MSE (Mean Squared Error) for video component. + + :param data_a: Data for component a + :param data_b: Data for component b + :returns: The MSE value + """ + return np.mean(np.square(np.subtract(data_a, data_b))) + + +def _compute_comp_psnr(data_a, data_b, max_val): + """Compute PSNR for video component. + + :param data_a: Data for component a + :param data_b: Data for component b + :param max_val: Maximum value for a component pixel + :returns: The PSNR + """ + mse = _compute_comp_mse(data_a, data_b) + if mse == 0: + return float('Inf') + else: + return 10.0 * math.log10((max_val**2)/mse) + + +def compute_psnr(grain_a, grain_b): + """Compute PSNR for video grains. + + :param grain_a: A VIDEOGRAIN + :param grain_b: A VIDEOGRAIN + :returns: A list of PSNR value for each video component + """ + if grain_a.grain_type != grain_b.grain_type or grain_a.grain_type != "video": + raise AttributeError("Invalid grain types") + if grain_a.width != grain_b.width or grain_a.height != grain_b.height: + raise AttributeError("Frame dimensions differ") + + if COG_FRAME_IS_COMPRESSED(grain_a.format): + raise NotImplementedError("Compressed video is not supported") + + if not isinstance(grain_a, numpy_VIDEOGRAIN): + grain_a = numpy_VideoGrain(grain_a) + if not isinstance(grain_b, numpy_VIDEOGRAIN): + grain_b = numpy_VideoGrain(grain_b) + + psnr = [] + max_val = (1 << COG_FRAME_FORMAT_ACTIVE_BITS(grain_a.format)) - 1 + for comp_data_a, comp_data_b in zip(grain_a.component_data, grain_b.component_data): + psnr.append(_compute_comp_psnr(comp_data_a, comp_data_b, max_val)) + + return psnr diff --git a/setup.py b/setup.py index 0e704cb..db72e49 100644 --- a/setup.py +++ b/setup.py @@ -16,46 +16,38 @@ # from __future__ import print_function -from setuptools import setup -import os - - -def is_package(path): - return ( - os.path.isdir(path) and - os.path.isfile(os.path.join(path, '__init__.py')) - ) - - -def find_packages(path, base=""): - """ Find all packages in path """ - packages = {} - for item in os.listdir(path): - dir = os.path.join(path, item) - if is_package(dir): - if base: - module_name = "%(base)s.%(item)s" % vars() - else: - module_name = item - packages[module_name] = dir - packages.update(find_packages(dir, module_name)) - return packages +from setuptools import setup +from sys import version_info -packages = find_packages(".") -package_names = packages.keys() +packages = { + 'mediagrains': 'mediagrains', + 'mediagrains.hypothesis': 'mediagrains/hypothesis', + 'mediagrains.comparison': 'mediagrains/comparison', + 'mediagrains.utils': 'mediagrains/utils' +} packages_required = [ "mediatimestamp >= 1.2.0", 'enum34 >= 1.1.6;python_version<"3.4"', "six >= 1.10.0", "frozendict >= 1.2", + 'numpy >= 1.17.2;python_version>="3.6"', + 'numpy;python_version<"3.6"' ] deps_required = [] + +if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): + packages['mediagrains_py36'] = 'mediagrains_py36' + packages['mediagrains_py36.asyncio'] = 'mediagrains_py36/asyncio' + + +package_names = list(packages.keys()) + setup(name="mediagrains", - version="2.5.3", + version="2.6.0", description="Simple utility for grain-based media", url='https://github.com/bbc/rd-apmm-python-lib-mediagrains', author='James Weaver', diff --git a/tests/test36_asyncio_gsf.py b/tests/test36_asyncio_gsf.py new file mode 100644 index 0000000..e48ac44 --- /dev/null +++ b/tests/test36_asyncio_gsf.py @@ -0,0 +1,563 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from unittest import TestCase +import asyncio +import warnings +import aiofiles +from datetime import datetime +from uuid import UUID + +from mediagrains.asyncio import AsyncGSFDecoder, AsyncLazyLoaderUnloadedError, loads +from mediagrains.grain import VIDEOGRAIN, AUDIOGRAIN, EVENTGRAIN, CODEDVIDEOGRAIN, CODEDAUDIOGRAIN +from mediagrains.gsf import GSFDecodeError +from mediagrains.gsf import GSFDecodeBadVersionError +from mediagrains.gsf import GSFDecodeBadFileTypeError +from mediagrains.cogenums import CogFrameFormat, CogFrameLayout, CogAudioFormat + +from mediatimestamp.immutable import Timestamp, TimeOffset + + +with open('examples/video.gsf', 'rb') as f: + VIDEO_DATA = f.read() + +with open('examples/coded_video.gsf', 'rb') as f: + CODED_VIDEO_DATA = f.read() + +with open('examples/audio.gsf', 'rb') as f: + AUDIO_DATA = f.read() + +with open('examples/coded_audio.gsf', 'rb') as f: + CODED_AUDIO_DATA = f.read() + +with open('examples/event.gsf', 'rb') as f: + EVENT_DATA = f.read() + +with open('examples/interleaved.gsf', 'rb') as f: + INTERLEAVED_DATA = f.read() + + +def async_test(f): + def __inner(*args, **kwargs): + loop = asyncio.get_event_loop() + loop.set_debug(True) + E = None + warns = [] + + try: + with warnings.catch_warnings(record=True) as warns: + loop.run_until_complete(f(*args, **kwargs)) + + except AssertionError as e: + E = e + except Exception as e: + E = e + + for w in warns: + warnings.showwarning(w.message, + w.category, + w.filename, + w.lineno) + if E is None: + args[0].assertEqual(len(warns), 0, + msg="asyncio subsystem generated warnings due to unawaited coroutines") + else: + raise E + + return __inner + + +class TestAsyncGSFBlock (TestCase): + @async_test + async def test_decode_headers(self): + async with aiofiles.open('examples/video.gsf', 'rb') as video_data_stream: + UUT = AsyncGSFDecoder(file_data=video_data_stream) + head = await UUT.decode_file_headers() + + self.assertEqual(head['created'], datetime(2018, 2, 7, 10, 38, 22)) + self.assertEqual(head['id'], UUID('163fd9b7-bef4-4d92-8488-31f3819be008')) + self.assertEqual(len(head['segments']), 1) + self.assertEqual(head['segments'][0]['id'], UUID('c6a3d3ff-74c0-446d-b59e-de1041f27e8a')) + + @async_test + async def test_generate_grains(self): + """Test that the generator yields each grain""" + async with aiofiles.open('examples/video.gsf', 'rb') as video_data_stream: + grain_count = 0 + async for (grain, local_id) in AsyncGSFDecoder(file_data=video_data_stream).grains(): + self.assertIsInstance(grain, VIDEOGRAIN) + self.assertEqual(grain.source_id, UUID('49578552-fb9e-4d3e-a197-3e3c437a895d')) + self.assertEqual(grain.flow_id, UUID('6e55f251-f75a-4d56-b3af-edb8b7993c3c')) + + grain_count += 1 + + self.assertEqual(10, grain_count) # There are 10 grains in the file + + @async_test + async def test_local_id_filtering(self): + async with aiofiles.open('examples/interleaved.gsf', 'rb') as interleaved_data_stream: + async with AsyncGSFDecoder(file_data=interleaved_data_stream) as UUT: + local_ids = set() + flow_ids = set() + async for (grain, local_id) in UUT.grains(): + local_ids.add(local_id) + flow_ids.add(grain.flow_id) + + self.assertEqual(local_ids, set([1, 2])) + self.assertEqual(flow_ids, set([UUID('28e4e09e-3517-11e9-8da2-5065f34ed007'), + UUID('2472f38e-3517-11e9-8da2-5065f34ed007')])) + + async with AsyncGSFDecoder(file_data=interleaved_data_stream) as UUT: + async for (grain, local_id) in UUT.grains(local_ids=[1]): + self.assertIsInstance(grain, AUDIOGRAIN) + self.assertEqual(grain.source_id, UUID('1f8fd27e-3517-11e9-8da2-5065f34ed007')) + self.assertEqual(grain.flow_id, UUID('28e4e09e-3517-11e9-8da2-5065f34ed007')) + self.assertEqual(local_id, 1) + + async with AsyncGSFDecoder(file_data=interleaved_data_stream) as UUT: + async for (grain, local_id) in UUT.grains(local_ids=[2]): + self.assertIsInstance(grain, VIDEOGRAIN) + self.assertEqual(grain.source_id, UUID('1f8fd27e-3517-11e9-8da2-5065f34ed007')) + self.assertEqual(grain.flow_id, UUID('2472f38e-3517-11e9-8da2-5065f34ed007')) + self.assertEqual(local_id, 2) + + @async_test + async def test_lazy_loading(self): + async with aiofiles.open('examples/video.gsf', 'rb') as video_data_stream: + grains = [grain async for (grain, local_id) in AsyncGSFDecoder(file_data=video_data_stream).grains()] + + with self.assertRaises(AsyncLazyLoaderUnloadedError): + grains[0].data[0] + + await grains[0].data.load() + + self.assertEqual(grains[0].data[0:1024], b"\x10" * 1024) + + +class TestAsyncGSFLoads(TestCase): + @async_test + async def test_loads_video(self): + (head, segments) = await loads(VIDEO_DATA) + + self.assertEqual(head['created'], datetime(2018, 2, 7, 10, 38, 22)) + self.assertEqual(head['id'], UUID('163fd9b7-bef4-4d92-8488-31f3819be008')) + self.assertEqual(len(head['segments']), 1) + self.assertEqual(head['segments'][0]['id'], UUID('c6a3d3ff-74c0-446d-b59e-de1041f27e8a')) + self.assertIn(head['segments'][0]['local_id'], segments) + self.assertEqual(len(segments[head['segments'][0]['local_id']]), head['segments'][0]['count']) + + ots = Timestamp(1420102800, 0) + for grain in segments[head['segments'][0]['local_id']]: + self.assertIsInstance(grain, VIDEOGRAIN) + self.assertEqual(grain.grain_type, "video") + self.assertEqual(grain.source_id, UUID('49578552-fb9e-4d3e-a197-3e3c437a895d')) + self.assertEqual(grain.flow_id, UUID('6e55f251-f75a-4d56-b3af-edb8b7993c3c')) + self.assertEqual(grain.origin_timestamp, ots) + ots += TimeOffset.from_nanosec(20000000) + + self.assertEqual(grain.format, CogFrameFormat.U8_420) + self.assertEqual(grain.layout, CogFrameLayout.FULL_FRAME) + self.assertEqual(grain.width, 480) + self.assertEqual(grain.height, 270) + + self.assertEqual(len(grain.components), 3) + + self.assertEqual(grain.components[0].width, 480) + self.assertEqual(grain.components[0].height, 270) + self.assertEqual(grain.components[0].stride, 480) + self.assertEqual(grain.components[0].length, 480*270) + self.assertEqual(grain.components[0].offset, 0) + + self.assertEqual(grain.components[1].width, 240) + self.assertEqual(grain.components[1].height, 135) + self.assertEqual(grain.components[1].stride, 240) + self.assertEqual(grain.components[1].length, 240*135) + self.assertEqual(grain.components[1].offset, 480*270) + + self.assertEqual(grain.components[2].width, 240) + self.assertEqual(grain.components[2].height, 135) + self.assertEqual(grain.components[2].stride, 240) + self.assertEqual(grain.components[2].length, 240*135) + self.assertEqual(grain.components[2].offset, 480*270 + 240*135) + + self.assertEqual(len(grain.data), grain.components[0].length + grain.components[1].length + grain.components[2].length) + + @async_test + async def test_loads_audio(self): + (head, segments) = await loads(AUDIO_DATA) + + self.assertEqual(head['created'], datetime(2018, 2, 7, 10, 37, 50)) + self.assertEqual(head['id'], UUID('781fb6c5-d22f-4df5-ba69-69059efd5ced')) + self.assertEqual(len(head['segments']), 1) + self.assertEqual(head['segments'][0]['id'], UUID('fc4c5533-3fad-4437-93c0-8668cb876578')) + self.assertIn(head['segments'][0]['local_id'], segments) + self.assertEqual(len(segments[head['segments'][0]['local_id']]), head['segments'][0]['count']) + + start_ots = Timestamp(1420102800, 0) + ots = start_ots + total_samples = 0 + for grain in segments[head['segments'][0]['local_id']]: + self.assertIsInstance(grain, AUDIOGRAIN) + self.assertEqual(grain.grain_type, "audio") + self.assertEqual(grain.source_id, UUID('38bfd902-b35f-40d6-9ecf-dc95869130cf')) + self.assertEqual(grain.flow_id, UUID('f1c8c095-5739-46f4-9bbc-3d7050c9ba23')) + self.assertEqual(grain.origin_timestamp, ots) + + self.assertEqual(grain.format, CogAudioFormat.S24_INTERLEAVED) + self.assertEqual(grain.channels, 2) + self.assertEqual(grain.samples, 1024) + self.assertEqual(grain.sample_rate, 48000) + + self.assertEqual(len(grain.data), 6144) + total_samples += grain.samples + ots = start_ots + TimeOffset.from_count(total_samples, grain.sample_rate) + + @async_test + async def test_loads_coded_video(self): + (head, segments) = await loads(CODED_VIDEO_DATA) + + self.assertEqual(head['created'], datetime(2018, 2, 7, 10, 38, 41)) + self.assertEqual(head['id'], UUID('8875f02c-2528-4566-9e9a-23efc3a9bbe5')) + self.assertEqual(len(head['segments']), 1) + self.assertEqual(head['segments'][0]['id'], UUID('bdfa1343-0a20-4a98-92f5-0f7f0eb75479')) + self.assertIn(head['segments'][0]['local_id'], segments) + self.assertEqual(len(segments[head['segments'][0]['local_id']]), head['segments'][0]['count']) + + ots = Timestamp(1420102800, 0) + unit_offsets = [ + ([0, 6, 34, 42, 711, 719], 36114), + ([0, 6, 14], 380), + ([0, 6, 14], 8277), + ([0, 6, 14], 4914), + ([0, 6, 14], 4961), + ([0, 6, 14], 3777), + ([0, 6, 14], 1950), + ([0, 6, 14], 31), + ([0, 6, 14], 25), + ([0, 6, 14], 6241)] + for grain in segments[head['segments'][0]['local_id']]: + self.assertIsInstance(grain, CODEDVIDEOGRAIN) + self.assertEqual(grain.grain_type, "coded_video") + self.assertEqual(grain.source_id, UUID('49578552-fb9e-4d3e-a197-3e3c437a895d')) + self.assertEqual(grain.flow_id, UUID('b6b05efb-6067-4ff8-afac-ec735a85674e')) + self.assertEqual(grain.origin_timestamp, ots) + ots += TimeOffset.from_nanosec(20000000) + + self.assertEqual(grain.format, CogFrameFormat.H264) + self.assertEqual(grain.layout, CogFrameLayout.FULL_FRAME) + self.assertEqual(grain.origin_width, 1920) + self.assertEqual(grain.origin_height, 1080) + self.assertEqual(grain.coded_width, 0) + self.assertEqual(grain.coded_height, 0) + self.assertEqual(grain.length, unit_offsets[0][1]) + self.assertEqual(grain.temporal_offset, 0) + self.assertEqual(grain.unit_offsets, unit_offsets[0][0]) + unit_offsets.pop(0) + + @async_test + async def test_loads_rejects_incorrect_type_file(self): + with self.assertRaises(GSFDecodeBadFileTypeError) as cm: + await loads(b"POTATO23\x07\x00\x00\x00") + self.assertEqual(cm.exception.offset, 0) + self.assertEqual(cm.exception.filetype, "POTATO23") + + @async_test + async def test_loads_rejects_incorrect_version_file(self): + with self.assertRaises(GSFDecodeBadVersionError) as cm: + await loads(b"SSBBgrsg\x08\x00\x03\x00") + self.assertEqual(cm.exception.offset, 0) + self.assertEqual(cm.exception.major, 8) + self.assertEqual(cm.exception.minor, 3) + + @async_test + async def test_loads_rejects_bad_head_tag(self): + with self.assertRaises(GSFDecodeError) as cm: + await loads(b"SSBBgrsg\x07\x00\x00\x00" + + b"\xff\xff\xff\xff\x00\x00\x00\x00") + self.assertEqual(cm.exception.offset, 12) + + @async_test + async def test_loads_raises_exception_without_head(self): + with self.assertRaises(GSFDecodeError) as cm: + await loads(b"SSBBgrsg\x07\x00\x00\x00") + self.assertEqual(cm.exception.offset, 12) + + @async_test + async def test_loads_skips_unknown_block_before_head(self): + (head, segments) = await loads(b"SSBBgrsg\x07\x00\x00\x00" + + b"dumy\x08\x00\x00\x00" + + b"head\x1f\x00\x00\x00" + + b"\xd1\x9c\x0b\x91\x15\x90\x11\xe8\x85\x80\xdc\xa9\x04\x82N\xec" + + b"\xbf\x07\x03\x1d\x0f\x0f\x0f") + + self.assertEqual(head['id'], UUID('d19c0b91-1590-11e8-8580-dca904824eec')) + self.assertEqual(head['created'], datetime(1983, 3, 29, 15, 15, 15)) + self.assertEqual(head['segments'], []) + self.assertEqual(head['tags'], []) + + @async_test + async def test_loads_skips_unknown_block_instead_of_segm(self): + (head, segments) = await loads(b"SSBBgrsg\x07\x00\x00\x00" + + b"head\x27\x00\x00\x00" + + b"\xd1\x9c\x0b\x91\x15\x90\x11\xe8\x85\x80\xdc\xa9\x04\x82N\xec" + + b"\xbf\x07\x03\x1d\x0f\x0f\x0f" + + b"dumy\x08\x00\x00\x00") + + self.assertEqual(head['id'], UUID('d19c0b91-1590-11e8-8580-dca904824eec')) + self.assertEqual(head['created'], datetime(1983, 3, 29, 15, 15, 15)) + self.assertEqual(head['segments'], []) + self.assertEqual(head['tags'], []) + + @async_test + async def test_loads_skips_unknown_block_before_segm(self): + (head, segments) = await loads(b"SSBBgrsg\x07\x00\x00\x00" + + (b"head\x49\x00\x00\x00" + + b"\xd1\x9c\x0b\x91\x15\x90\x11\xe8\x85\x80\xdc\xa9\x04\x82N\xec" + + b"\xbf\x07\x03\x1d\x0f\x0f\x0f" + + (b"dumy\x08\x00\x00\x00") + + (b"segm\x22\x00\x00\x00" + + b"\x01\x00" + + b"\xd3\xe1\x91\xf0\x15\x94\x11\xe8\x91\xac\xdc\xa9\x04\x82N\xec" + + b"\x00\x00\x00\x00\x00\x00\x00\x00"))) + + self.assertEqual(head['id'], UUID('d19c0b91-1590-11e8-8580-dca904824eec')) + self.assertEqual(head['created'], datetime(1983, 3, 29, 15, 15, 15)) + self.assertEqual(len(head['segments']), 1) + self.assertEqual(head['segments'][0]['local_id'], 1) + self.assertEqual(head['segments'][0]['id'], UUID('d3e191f0-1594-11e8-91ac-dca904824eec')) + self.assertEqual(head['segments'][0]['tags'], []) + self.assertEqual(head['segments'][0]['count'], 0) + self.assertEqual(head['tags'], []) + + @async_test + async def test_loads_raises_when_head_too_small(self): + with self.assertRaises(GSFDecodeError) as cm: + (head, segments) = await loads(b"SSBBgrsg\x07\x00\x00\x00" + + (b"head\x29\x00\x00\x00" + + b"\xd1\x9c\x0b\x91\x15\x90\x11\xe8\x85\x80\xdc\xa9\x04\x82N\xec" + + b"\xbf\x07\x03\x1d\x0f\x0f\x0f" + + (b"dumy\x08\x00\x00\x00") + + (b"segm\x22\x00\x00\x00" + + b"\x01\x00" + + b"\xd3\xe1\x91\xf0\x15\x94\x11\xe8\x91\xac\xdc\xa9\x04\x82N\xec" + + b"\x00\x00\x00\x00\x00\x00\x00\x00"))) + + self.assertEqual(cm.exception.offset, 51) + + @async_test + async def test_loads_raises_when_segm_too_small(self): + with self.assertRaises(GSFDecodeError) as cm: + (head, segments) = await loads(b"SSBBgrsg\x07\x00\x00\x00" + + (b"head\x41\x00\x00\x00" + + b"\xd1\x9c\x0b\x91\x15\x90\x11\xe8\x85\x80\xdc\xa9\x04\x82N\xec" + + b"\xbf\x07\x03\x1d\x0f\x0f\x0f" + + (b"segm\x21\x00\x00\x00" + + b"\x01\x00" + + b"\xd3\xe1\x91\xf0\x15\x94\x11\xe8\x91\xac\xdc\xa9\x04\x82N\xec" + + b"\x00\x00\x00\x00\x00\x00\x00\x00"))) + + self.assertEqual(cm.exception.offset, 77) + + @async_test + async def test_loads_decodes_tils(self): + src_id = UUID('c707d64c-1596-11e8-a3fb-dca904824eec') + flow_id = UUID('da78668a-1596-11e8-a577-dca904824eec') + (head, segments) = await loads(b"SSBBgrsg\x07\x00\x00\x00" + + (b"head\x41\x00\x00\x00" + + b"\xd1\x9c\x0b\x91\x15\x90\x11\xe8\x85\x80\xdc\xa9\x04\x82N\xec" + + b"\xbf\x07\x03\x1d\x0f\x0f\x0f" + + (b"segm\x22\x00\x00\x00" + + b"\x01\x00" + + b"\xd3\xe1\x91\xf0\x15\x94\x11\xe8\x91\xac\xdc\xa9\x04\x82N\xec" + + b"\x01\x00\x00\x00\x00\x00\x00\x00")) + + (b"grai\x8d\x00\x00\x00" + + b"\x01\x00" + + (b"gbhd\x83\x00\x00\x00" + + src_id.bytes + + flow_id.bytes + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + + (b"tils\x27\x00\x00\x00" + + b"\x01\x00" + + b"dummy timecode\x00\x00" + + b"\x07\x00\x00\x00" + + b"\x19\x00\x00\x00\x01\x00\x00\x00" + + b"\x00"))) + + (b"grai\x08\x00\x00\x00")) + + self.assertEqual(head['id'], UUID('d19c0b91-1590-11e8-8580-dca904824eec')) + self.assertEqual(head['created'], datetime(1983, 3, 29, 15, 15, 15)) + self.assertEqual(len(head['segments']), 1) + self.assertEqual(head['segments'][0]['local_id'], 1) + self.assertEqual(head['segments'][0]['id'], UUID('d3e191f0-1594-11e8-91ac-dca904824eec')) + self.assertEqual(head['segments'][0]['tags'], []) + self.assertEqual(head['segments'][0]['count'], 1) + self.assertEqual(head['tags'], []) + self.assertEqual(segments[1][0].timelabels, [{'tag': 'dummy timecode', 'timelabel': {'frames_since_midnight': 7, + 'frame_rate_numerator': 25, + 'frame_rate_denominator': 1, + 'drop_frame': False}}]) + + @async_test + async def test_loads_raises_when_grain_type_unknown(self): + with self.assertRaises(GSFDecodeError) as cm: + src_id = UUID('c707d64c-1596-11e8-a3fb-dca904824eec') + flow_id = UUID('da78668a-1596-11e8-a577-dca904824eec') + (head, segments) = await loads(b"SSBBgrsg\x07\x00\x00\x00" + + (b"head\x41\x00\x00\x00" + + b"\xd1\x9c\x0b\x91\x15\x90\x11\xe8\x85\x80\xdc\xa9\x04\x82N\xec" + + b"\xbf\x07\x03\x1d\x0f\x0f\x0f" + + (b"segm\x22\x00\x00\x00" + + b"\x01\x00" + + b"\xd3\xe1\x91\xf0\x15\x94\x11\xe8\x91\xac\xdc\xa9\x04\x82N\xec" + + b"\x01\x00\x00\x00\x00\x00\x00\x00")) + + (b"grai\x8d\x00\x00\x00" + + b"\x01\x00" + + (b"gbhd\x83\x00\x00\x00" + + src_id.bytes + + flow_id.bytes + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + + (b"dumy\x08\x00\x00\x00")))) + + self.assertEqual(cm.exception.offset, 179) + + @async_test + async def test_loads_decodes_empty_grains(self): + src_id = UUID('c707d64c-1596-11e8-a3fb-dca904824eec') + flow_id = UUID('da78668a-1596-11e8-a577-dca904824eec') + (head, segments) = await loads(b"SSBBgrsg\x07\x00\x00\x00" + + (b"head\x41\x00\x00\x00" + + b"\xd1\x9c\x0b\x91\x15\x90\x11\xe8\x85\x80\xdc\xa9\x04\x82N\xec" + + b"\xbf\x07\x03\x1d\x0f\x0f\x0f" + + (b"segm\x22\x00\x00\x00" + + b"\x01\x00" + + b"\xd3\xe1\x91\xf0\x15\x94\x11\xe8\x91\xac\xdc\xa9\x04\x82N\xec" + + b"\x02\x00\x00\x00\x00\x00\x00\x00")) + + (b"grai\x66\x00\x00\x00" + + b"\x01\x00" + + (b"gbhd\x5c\x00\x00\x00" + + src_id.bytes + + flow_id.bytes + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00")) + + (b"dumy\x08\x00\x00\x00") + + (b"grai\x6E\x00\x00\x00" + + b"\x01\x00" + + (b"gbhd\x5c\x00\x00\x00" + + src_id.bytes + + flow_id.bytes + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00" + + b"\x00\x00\x00\x00\x00\x00\x00\x00") + + (b"grdt\x08\x00\x00\x00")) + + (b"dumy\x08\x00\x00\x00")) + + self.assertEqual(len(segments[1]), 2) + self.assertEqual(segments[1][0].grain_type, "empty") + self.assertIsNone(segments[1][0].data) + self.assertEqual(segments[1][1].grain_type, "empty") + self.assertIsNone(segments[1][1].data) + + @async_test + async def test_loads_coded_audio(self): + (head, segments) = await loads(CODED_AUDIO_DATA) + + self.assertEqual(head['created'], datetime(2018, 2, 7, 10, 38, 5)) + self.assertEqual(head['id'], UUID('2dbc5889-15f1-427c-b727-5201dd3b053c')) + self.assertEqual(len(head['segments']), 1) + self.assertEqual(head['segments'][0]['id'], UUID('6ca3a217-f2c2-4344-832b-6ea87bc5ddb8')) + self.assertIn(head['segments'][0]['local_id'], segments) + self.assertEqual(len(segments[head['segments'][0]['local_id']]), head['segments'][0]['count']) + + start_ots = Timestamp(1420102800, 0) + ots = start_ots + total_samples = 0 + lengths = [603, + 690, + 690, + 689, + 690, + 690, + 689, + 690, + 690, + 689] + for grain in segments[head['segments'][0]['local_id']]: + self.assertIsInstance(grain, CODEDAUDIOGRAIN) + self.assertEqual(grain.grain_type, "coded_audio") + self.assertEqual(grain.source_id, UUID('38bfd902-b35f-40d6-9ecf-dc95869130cf')) + self.assertEqual(grain.flow_id, UUID('e615296b-ff40-4d95-8398-6a4082305f3a')) + self.assertEqual(grain.origin_timestamp, ots) + + self.assertEqual(grain.format, CogAudioFormat.AAC) + self.assertEqual(grain.channels, 2) + self.assertEqual(grain.samples, 1024) + self.assertEqual(grain.priming, 0) + self.assertEqual(grain.remainder, 0) + self.assertEqual(grain.sample_rate, 48000) + + self.assertEqual(len(grain.data), lengths[0]) + lengths.pop(0) + total_samples += grain.samples + ots = start_ots + TimeOffset.from_count(total_samples, grain.sample_rate) + + @async_test + async def test_loads_event(self): + self.maxDiff = None + (head, segments) = await loads(EVENT_DATA) + + self.assertEqual(head['created'], datetime(2018, 2, 7, 10, 37, 35)) + self.assertEqual(head['id'], UUID('3c45f8b5-1853-4723-808a-ab5cbf598ccc')) + self.assertEqual(len(head['segments']), 1) + self.assertEqual(head['segments'][0]['id'], UUID('db095cb5-050b-4b8c-92e8-31351422e93a')) + self.assertIn(head['segments'][0]['local_id'], segments) + self.assertEqual(len(segments[head['segments'][0]['local_id']]), head['segments'][0]['count']) + + start_ots = Timestamp(1447176512, 400000000) + ots = start_ots + line = '' + seqnum = 3107787894242499264 + for grain in segments[head['segments'][0]['local_id']]: + self.assertIsInstance(grain, EVENTGRAIN) + self.assertEqual(grain.grain_type, "event") + self.assertEqual(grain.source_id, UUID('2db4268e-82ef-49f9-bc0f-1726e8352d76')) + self.assertEqual(grain.flow_id, UUID('5333bae9-0768-4e31-be1c-fbd5dc2e34ac')) + self.assertEqual(grain.origin_timestamp, ots) + + self.assertEqual(grain.event_type, 'urn:x-ipstudio:format:event.ttext.ebuttlive') + self.assertEqual(grain.topic, '') + self.assertEqual(len(grain.event_data), 1) + self.assertEqual(grain.event_data[0].path, 'Subs') + self.assertEqual(grain.event_data[0].pre, line) + line = '\nv1.0140gb' + ots.to_iso8601_utc() + '' # NOQA + self.assertEqual(grain.event_data[0].post, line, msg="\n\nExpected:\n\n{!r}\n\nGot:\n\n{!r}\n\n".format(line, grain.event_data[0].post)) + + ots = ots + TimeOffset.from_nanosec(20000000) + seqnum += 20000000 diff --git a/tests/test36_numpy_videograin.py b/tests/test36_numpy_videograin.py new file mode 100644 index 0000000..5157437 --- /dev/null +++ b/tests/test36_numpy_videograin.py @@ -0,0 +1,703 @@ +#!/usr/bin/python +# +# Copyright 2018 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from unittest import TestCase + +import uuid +from mediagrains.numpy import VideoGrain, VIDEOGRAIN +from mediagrains_py36.numpy.videograin import _dtype_from_cogframeformat +from mediagrains.cogenums import ( + CogFrameFormat, + CogFrameLayout, + COG_FRAME_FORMAT_BYTES_PER_VALUE, + COG_FRAME_FORMAT_H_SHIFT, + COG_FRAME_FORMAT_V_SHIFT, + COG_FRAME_IS_PACKED, + COG_FRAME_IS_COMPRESSED, + COG_FRAME_IS_PLANAR, + COG_FRAME_IS_PLANAR_RGB, + COG_FRAME_FORMAT_ACTIVE_BITS, + COG_PLANAR_FORMAT, + PlanarChromaFormat) +from mediatimestamp.immutable import Timestamp, TimeRange +import mock +from fractions import Fraction +from copy import copy, deepcopy +from typing import Tuple, Optional + +from itertools import chain, repeat + +import numpy as np + +from pdb import set_trace + + +class TestGrain (TestCase): + def _get_bitdepth(self, fmt): + if COG_FRAME_IS_PLANAR(fmt): + return COG_FRAME_FORMAT_ACTIVE_BITS(fmt) + elif fmt in [CogFrameFormat.UYVY, + CogFrameFormat.YUYV, + CogFrameFormat.RGB, + CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx, + CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: + return 8 + elif fmt == CogFrameFormat.v216: + return 16 + elif fmt == CogFrameFormat.v210: + return 10 + else: + raise Exception() + + def _get_hs_vs_and_bps(self, fmt): + if COG_FRAME_IS_PLANAR(fmt): + return (COG_FRAME_FORMAT_H_SHIFT(fmt), COG_FRAME_FORMAT_V_SHIFT(fmt), COG_FRAME_FORMAT_BYTES_PER_VALUE(fmt)) + elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV]: + return (1, 0, 1) + elif fmt in [CogFrameFormat.RGB, + CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx, + CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: + return (0, 0, 1) + elif fmt == CogFrameFormat.v216: + return (1, 0, 2) + elif fmt == CogFrameFormat.v210: + return (1, 0, 4) + else: + raise Exception() + + def _is_rgb(self, fmt): + if COG_FRAME_IS_PLANAR(fmt): + return COG_FRAME_IS_PLANAR_RGB(fmt) + elif fmt in [CogFrameFormat.UYVY, + CogFrameFormat.YUYV, + CogFrameFormat.v216, + CogFrameFormat.v210]: + return False + elif fmt in [CogFrameFormat.RGB, + CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx, + CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: + return True + else: + raise Exception() + + def assertComponentsAreModifiable(self, grain): + width = grain.width + height = grain.height + fmt = grain.format + + (hs, vs, _) = self._get_hs_vs_and_bps(fmt) + + # Test that changes to the component arrays are reflected in the main data array + for y in range(0, 16): + for x in range(0, 16): + grain.component_data[0][x, y] = (y*16 + x) & 0x3F + + for y in range(0, 16 >> vs): + for x in range(0, 16 >> hs): + grain.component_data[1][x, y] = (y*16 + x) & 0x3F + 0x40 + grain.component_data[2][x, y] = (y*16 + x) & 0x3F + 0x50 + + if COG_FRAME_IS_PLANAR(fmt): + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width + x], (y*16 + x) & 0x3F) + + for y in range(0, 16 >> vs): + for x in range(0, 16 >> hs): + self.assertEqual(grain.data[width*height + y*(width >> hs) + x], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[width*height + (width >> hs)*(height >> vs) + y*(width >> hs) + x], (y*16 + x) & 0x3F + 0x50) + + elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.v216]: + for y in range(0, 16): + for x in range(0, 8): + self.assertEqual(grain.data[y*width*2 + 4*x + 0], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*2 + 4*x + 1], (y*16 + 2*x + 0) & 0x3F) + self.assertEqual(grain.data[y*width*2 + 4*x + 2], (y*16 + x) & 0x3F + 0x50) + self.assertEqual(grain.data[y*width*2 + 4*x + 3], (y*16 + 2*x + 1) & 0x3F) + + elif fmt == CogFrameFormat.YUYV: + for y in range(0, 16): + for x in range(0, 8): + self.assertEqual(grain.data[y*width*2 + 4*x + 0], (y*16 + 2*x + 0) & 0x3F) + self.assertEqual(grain.data[y*width*2 + 4*x + 1], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*2 + 4*x + 2], (y*16 + 2*x + 1) & 0x3F) + self.assertEqual(grain.data[y*width*2 + 4*x + 3], (y*16 + x) & 0x3F + 0x50) + + elif fmt == CogFrameFormat.RGB: + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width*3 + 3*x + 0], (y*16 + x) & 0x3F) + self.assertEqual(grain.data[y*width*3 + 3*x + 1], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*3 + 3*x + 2], (y*16 + x) & 0x3F + 0x50) + + elif fmt in [CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx]: + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width*4 + 4*x + 0], (y*16 + x) & 0x3F) + self.assertEqual(grain.data[y*width*4 + 4*x + 1], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*4 + 4*x + 2], (y*16 + x) & 0x3F + 0x50) + + elif fmt in [CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*width*4 + 4*x + 1], (y*16 + x) & 0x3F) + self.assertEqual(grain.data[y*width*4 + 4*x + 2], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[y*width*4 + 4*x + 3], (y*16 + x) & 0x3F + 0x50) + + else: + raise Exception() + + def assertIsVideoGrain(self, + fmt, + src_id=uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429"), + flow_id=uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb"), + ots=Timestamp.from_tai_sec_nsec("417798915:5"), + sts=Timestamp.from_tai_sec_nsec("417798915:10"), + cts=Timestamp.from_tai_sec_nsec("417798915:0"), + rate=Fraction(25, 1), + width=1920, + height=1080, + ignore_cts=False): + def __inner(grain): + self.assertEqual(grain.grain_type, "video") + self.assertEqual(grain.source_id, src_id) + self.assertEqual(grain.flow_id, flow_id) + self.assertEqual(grain.origin_timestamp, ots) + self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) + self.assertEqual(grain.sync_timestamp, sts) + if not ignore_cts: + self.assertEqual(grain.creation_timestamp, cts) + self.assertEqual(grain.rate, rate) + self.assertEqual(grain.duration, 1/rate) + self.assertEqual(grain.timelabels, []) + self.assertEqual(grain.format, fmt) + self.assertEqual(grain.width, width) + self.assertEqual(grain.height, height) + self.assertEqual(grain.layout, CogFrameLayout.FULL_FRAME) + self.assertEqual(grain.extension, 0) + self.assertIsNone(grain.source_aspect_ratio) + self.assertIsNone(grain.pixel_aspect_ratio) + + (hs, vs, bps) = self._get_hs_vs_and_bps(fmt) + + if COG_FRAME_IS_PLANAR(fmt): + self.assertEqual(len(grain.components), 3) + self.assertEqual(grain.components[0].stride, width*bps) + self.assertEqual(grain.components[0].width, width) + self.assertEqual(grain.components[0].height, height) + self.assertEqual(grain.components[0].offset, 0) + self.assertEqual(grain.components[0].length, width*height*bps) + self.assertEqual(len(grain.components[0]), 5) + + self.assertEqual(grain.components[1].stride, width*bps >> hs) + self.assertEqual(grain.components[1].width, width >> hs) + self.assertEqual(grain.components[1].height, height >> vs) + self.assertEqual(grain.components[1].offset, width*height*bps) + self.assertEqual(grain.components[1].length, width*height*bps >> (hs + vs)) + self.assertEqual(len(grain.components[1]), 5) + + self.assertEqual(grain.components[2].stride, width*bps >> hs) + self.assertEqual(grain.components[2].width, width >> hs) + self.assertEqual(grain.components[2].height, height >> vs) + self.assertEqual(grain.components[2].offset, width*height*bps + (width*height*bps >> (hs + vs))) + self.assertEqual(grain.components[2].length, width*height*bps >> (hs + vs)) + self.assertEqual(len(grain.components[2]), 5) + + self.assertEqual(grain.expected_length, (width*height + 2*(width >> hs)*(height >> vs))*bps) + elif fmt in [CogFrameFormat.UYVY, CogFrameFormat.YUYV]: + self.assertEqual(len(grain.components), 1) + self.assertEqual(grain.components[0].stride, width*bps + 2*(width >> hs)*bps) + self.assertEqual(grain.components[0].width, width) + self.assertEqual(grain.components[0].height, height) + self.assertEqual(grain.components[0].offset, 0) + self.assertEqual(grain.components[0].length, width*height*bps*2) + self.assertEqual(len(grain.components[0]), 5) + + self.assertEqual(grain.expected_length, width*height*bps*2) + elif fmt in [CogFrameFormat.RGB]: + self.assertEqual(len(grain.components), 1) + self.assertEqual(grain.components[0].stride, 3*width*bps) + self.assertEqual(grain.components[0].width, width) + self.assertEqual(grain.components[0].height, height) + self.assertEqual(grain.components[0].offset, 0) + self.assertEqual(grain.components[0].length, width*height*bps*3) + self.assertEqual(len(grain.components[0]), 5) + elif fmt in [CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx, + CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR]: + self.assertEqual(len(grain.components), 1) + self.assertEqual(grain.components[0].stride, 4*width*bps) + self.assertEqual(grain.components[0].width, width) + self.assertEqual(grain.components[0].height, height) + self.assertEqual(grain.components[0].offset, 0) + self.assertEqual(grain.components[0].length, width*height*bps*4) + self.assertEqual(len(grain.components[0]), 5) + + elif fmt == CogFrameFormat.v216: + self.assertEqual(len(grain.components), 1) + self.assertEqual(grain.components[0].stride, 2*width*bps) + self.assertEqual(grain.components[0].width, width) + self.assertEqual(grain.components[0].height, height) + self.assertEqual(grain.components[0].offset, 0) + self.assertEqual(grain.components[0].length, width*height*bps*2) + self.assertEqual(len(grain.components[0]), 5) + + elif fmt == CogFrameFormat.v210: + self.assertEqual(len(grain.components), 1) + self.assertEqual(grain.components[0].stride, (((width + 47) // 48) * 128)) + self.assertEqual(grain.components[0].width, width) + self.assertEqual(grain.components[0].height, height) + self.assertEqual(grain.components[0].offset, 0) + self.assertEqual(grain.components[0].length, height*(((width + 47) // 48) * 128)) + self.assertEqual(len(grain.components[0]), 5) + + else: + raise Exception() + + if bps == 1: + dtype = np.dtype(np.uint8) + elif bps == 2: + dtype = np.dtype(np.uint16) + elif bps == 4: + dtype = np.dtype(np.uint32) + else: + raise Exception() + + self.assertIsInstance(grain.data, np.ndarray) + self.assertEqual(grain.data.nbytes, grain.expected_length) + self.assertEqual(grain.data.dtype, dtype) + self.assertEqual(grain.data.size, grain.expected_length//bps) + self.assertEqual(grain.data.itemsize, bps) + self.assertEqual(grain.data.ndim, 1) + self.assertEqual(grain.data.shape, (grain.expected_length//bps,)) + + self.assertEqual(repr(grain), "VideoGrain({!r},< numpy data of length {} >)".format(grain.meta, len(grain.data))) + + if fmt == CogFrameFormat.v210: + # V210 is barely supported. Convert it to something else to actually use it! + self.assertEqual(len(grain.component_data), 0) + else: + self.assertIsInstance(grain.component_data[0], np.ndarray) + self.assertTrue(np.array_equal(grain.component_data[0].nbytes, width*height*bps)) + self.assertTrue(np.array_equal(grain.component_data[0].dtype, dtype)) + self.assertTrue(np.array_equal(grain.component_data[0].size, width*height)) + self.assertTrue(np.array_equal(grain.component_data[0].itemsize, bps)) + self.assertTrue(np.array_equal(grain.component_data[0].ndim, 2)) + self.assertTrue(np.array_equal(grain.component_data[0].shape, (width, height))) + + self.assertIsInstance(grain.component_data[1], np.ndarray) + self.assertTrue(np.array_equal(grain.component_data[1].nbytes, width*height*bps >> (hs + vs))) + self.assertTrue(np.array_equal(grain.component_data[1].dtype, dtype)) + self.assertTrue(np.array_equal(grain.component_data[1].size, width*height >> (hs + vs))) + self.assertTrue(np.array_equal(grain.component_data[1].itemsize, bps)) + self.assertTrue(np.array_equal(grain.component_data[1].ndim, 2)) + self.assertTrue(np.array_equal(grain.component_data[1].shape, (width >> hs, height >> vs))) + + self.assertIsInstance(grain.component_data[2], np.ndarray) + self.assertTrue(np.array_equal(grain.component_data[2].nbytes, width*height*bps >> (hs + vs))) + self.assertTrue(np.array_equal(grain.component_data[2].dtype, dtype)) + self.assertTrue(np.array_equal(grain.component_data[2].size, width*height >> (hs + vs))) + self.assertTrue(np.array_equal(grain.component_data[2].itemsize, bps)) + self.assertTrue(np.array_equal(grain.component_data[2].ndim, 2)) + self.assertTrue(np.array_equal(grain.component_data[2].shape, (width >> hs, height >> vs))) + + return __inner + + def _test_pattern_rgb(self, fmt: CogFrameFormat) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + """Return a 16x16 pixel RGB test pattern""" + bd = self._get_bitdepth(fmt) + + v = (1 << (bd - 2))*3 + return (np.array([[v, v, v, v, 0, 0, 0, 0, v, v, v, v, 0, 0, 0, 0] for _ in range(0, 16)], dtype=_dtype_from_cogframeformat(fmt)).transpose(), + np.array([[v, v, v, v, v, v, v, v, 0, 0, 0, 0, 0, 0, 0, 0] for _ in range(0, 16)], dtype=_dtype_from_cogframeformat(fmt)).transpose(), + np.array([[v, v, 0, 0, v, v, 0, 0, v, v, 0, 0, v, v, 0, 0] for _ in range(0, 16)], dtype=_dtype_from_cogframeformat(fmt)).transpose()) + + def _test_pattern_yuv(self, fmt: CogFrameFormat) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + (R, G, B) = self._test_pattern_rgb(fmt) + (R, G, B) = (R.astype(np.dtype(np.double)), + G.astype(np.dtype(np.double)), + B.astype(np.dtype(np.double))) + bd = self._get_bitdepth(fmt) + (hs, vs, _) = self._get_hs_vs_and_bps(fmt) + + Y = (R*0.2126 + G*0.7152 + B*0.0722) + U = (R*-0.114572 - G*0.385428 + B*0.5 + (1 << (bd - 1))) + V = (R*0.5 - G*0.454153 - B*0.045847 + (1 << (bd - 1))) + + if hs == 1: + U = (U[0::2, :] + U[1::2, :])/2 + V = (V[0::2, :] + V[1::2, :])/2 + if vs == 1: + U = (U[:, 0::2] + U[:, 1::2])/2 + V = (V[:, 0::2] + V[:, 1::2])/2 + + return (np.around(Y).astype(_dtype_from_cogframeformat(fmt)), np.around(U).astype(_dtype_from_cogframeformat(fmt)), np.around(V).astype(_dtype_from_cogframeformat(fmt))) + + def _test_pattern_v210(self) -> np.ndarray: + (Y, U, V) = self._test_pattern_yuv(CogFrameFormat.S16_422_10BIT) + + output = np.zeros(32*16, dtype=np.dtype(np.uint32)) + for y in range(0, 16): + + yy = chain(iter(Y[:, y]), repeat(0)) + uu = chain(iter(U[:, y]), repeat(0)) + vv = chain(iter(V[:, y]), repeat(0)) + + for x in range(0, 8): + output[y*32 + 4*x + 0] = next(uu) | (next(yy) << 10) | (next(vv) << 20) + output[y*32 + 4*x + 1] = next(yy) | (next(uu) << 10) | (next(yy) << 20) + output[y*32 + 4*x + 2] = next(vv) | (next(yy) << 10) | (next(uu) << 20) + output[y*32 + 4*x + 3] = next(yy) | (next(vv) << 10) | (next(yy) << 20) + + return output + + + def write_test_pattern(self, grain): + fmt = grain.format + + if self._is_rgb(fmt): + (R, G, B) = self._test_pattern_rgb(fmt) + + grain.component_data.R[:, :] = R + grain.component_data.G[:, :] = G + grain.component_data.B[:, :] = B + elif fmt == CogFrameFormat.v210: + grain.data[:] = self._test_pattern_v210() + else: + (Y, U, V) = self._test_pattern_yuv(fmt) + + grain.component_data.Y[:, :] = Y + grain.component_data.U[:, :] = U + grain.component_data.V[:, :] = V + + def assertArrayEqual(self, a: np.ndarray, b: np.ndarray, max_diff: Optional[int] = None): + if max_diff is None: + self.assertTrue(np.array_equal(a, b), msg="{} != {}".format(a, b)) + else: + a = a.astype(np.dtype(np.int64)) + b = b.astype(np.dtype(np.int64)) + self.assertTrue(np.amax(np.absolute(a - b)) <= max_diff, + msg="{} - {} = {} (allowing up to {} difference)".format(a, b, a - b, max_diff)) + + def assertMatchesTestPattern(self, grain: VIDEOGRAIN, max_diff: Optional[int] = None): + fmt = grain.format + + if self._is_rgb(fmt): + (R, G, B) = self._test_pattern_rgb(fmt) + + self.assertArrayEqual(grain.component_data.R[:, :], R, max_diff=max_diff) + self.assertArrayEqual(grain.component_data.G[:, :], G, max_diff=max_diff) + self.assertArrayEqual(grain.component_data.B[:, :], B, max_diff=max_diff) + elif fmt == CogFrameFormat.v210: + self.assertArrayEqual(grain.data, self._test_pattern_v210()) + else: + (Y, U, V) = self._test_pattern_yuv(fmt) + + self.assertArrayEqual(grain.component_data.Y[:, :], Y, max_diff=max_diff) + self.assertArrayEqual(grain.component_data.U[:, :], U, max_diff=max_diff) + self.assertArrayEqual(grain.component_data.V[:, :], V, max_diff=max_diff) + + def test_video_grain_create(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + cts = Timestamp.from_tai_sec_nsec("417798915:0") + ots = Timestamp.from_tai_sec_nsec("417798915:5") + sts = Timestamp.from_tai_sec_nsec("417798915:10") + + for fmt in [CogFrameFormat.S32_444, + CogFrameFormat.S32_422, + CogFrameFormat.S32_420, + CogFrameFormat.S16_444_10BIT, + CogFrameFormat.S16_422_10BIT, + CogFrameFormat.S16_420_10BIT, + CogFrameFormat.S16_444_12BIT, + CogFrameFormat.S16_422_12BIT, + CogFrameFormat.S16_420_12BIT, + CogFrameFormat.S16_444, + CogFrameFormat.S16_422, + CogFrameFormat.S16_420, + CogFrameFormat.U8_444, + CogFrameFormat.U8_422, + CogFrameFormat.U8_420, + CogFrameFormat.U8_444_RGB, + CogFrameFormat.S16_444_RGB, + CogFrameFormat.S16_444_12BIT_RGB, + CogFrameFormat.S16_444_10BIT_RGB, + CogFrameFormat.UYVY, + CogFrameFormat.YUYV, + CogFrameFormat.RGB, + CogFrameFormat.RGBx, + CogFrameFormat.RGBA, + CogFrameFormat.BGRx, + CogFrameFormat.BGRx, + CogFrameFormat.ARGB, + CogFrameFormat.xRGB, + CogFrameFormat.ABGR, + CogFrameFormat.xBGR, + CogFrameFormat.v216, + CogFrameFormat.v210]: + with self.subTest(fmt=fmt): + with mock.patch.object(Timestamp, "get_time", return_value=cts): + grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + cog_frame_format=fmt, + width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + self.assertIsVideoGrain(fmt)(grain) + + if fmt is not CogFrameFormat.v210: + self.assertComponentsAreModifiable(grain) + + def test_video_grain_convert(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + cts = Timestamp.from_tai_sec_nsec("417798915:0") + ots = Timestamp.from_tai_sec_nsec("417798915:5") + sts = Timestamp.from_tai_sec_nsec("417798915:10") + + + def pairs_from(fmts): + for fmt_in in fmts: + for fmt_out in fmts: + yield (fmt_in, fmt_out) + + fmts = [CogFrameFormat.YUYV, CogFrameFormat.UYVY, CogFrameFormat.U8_444, CogFrameFormat.U8_422, CogFrameFormat.U8_420, # All YUV 8bit formats + CogFrameFormat.RGB, CogFrameFormat.U8_444_RGB, CogFrameFormat.RGBx, CogFrameFormat.xRGB, CogFrameFormat.BGRx, CogFrameFormat.xBGR, # All 8-bit 3 component RGB formats + CogFrameFormat.v216, CogFrameFormat.S16_444, CogFrameFormat.S16_422, CogFrameFormat.S16_420, # All YUV 16bit formats + CogFrameFormat.S16_444_10BIT, CogFrameFormat.S16_422_10BIT, CogFrameFormat.S16_420_10BIT, # All YUV 10bit formats except for v210 + CogFrameFormat.v210, # v210, may the gods be merciful to us for including it + CogFrameFormat.S16_444_12BIT, CogFrameFormat.S16_422_12BIT, CogFrameFormat.S16_420_12BIT, # All YUV 12bit formats + CogFrameFormat.S32_444, CogFrameFormat.S32_422, CogFrameFormat.S32_420, # All YUV 32bit formats + CogFrameFormat.S16_444_RGB, CogFrameFormat.S16_444_10BIT_RGB, CogFrameFormat.S16_444_12BIT_RGB, CogFrameFormat.S32_444_RGB] # Other planar RGB formats + for (fmt_in, fmt_out) in pairs_from(fmts): + with self.subTest(fmt_in=fmt_in, fmt_out=fmt_out): + with mock.patch.object(Timestamp, "get_time", return_value=cts): + grain_in = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + cog_frame_format=fmt_in, + width=16, height=16, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + self.assertIsVideoGrain(fmt_in, width=16, height=16)(grain_in) + self.write_test_pattern(grain_in) + + grain_out = grain_in.convert(fmt_out) + + if fmt_in != fmt_out: + flow_id_out = grain_in.flow_id_for_converted_flow(fmt_out) + else: + flow_id_out = flow_id + self.assertIsVideoGrain(fmt_out, flow_id=flow_id_out, width=16, height=16, ignore_cts=True)(grain_out) + + # Some conversions for v210 are just really hard to check when not exact + # For other formats it's simpler + if fmt_out != CogFrameFormat.v210: + # We have several possible cases here: + # * We've changed bit-depth + # * We've changed colour subsampling + # * We've changed colourspace + # + # In addition we have have done none of those things, or even more than one + + # If we've increased bit-depth there will be rounding errors + if self._get_bitdepth(fmt_out) > self._get_bitdepth(fmt_in): + self.assertMatchesTestPattern(grain_out, max_diff=1 << (self._get_bitdepth(fmt_out) + 2 - self._get_bitdepth(fmt_in))) + + # If we're changing from yuv to rgb then there's some potential for floating point errors, depending on the sizes + elif self._get_bitdepth(fmt_in) >= 16 and not self._is_rgb(fmt_in) and fmt_out == CogFrameFormat.S16_444_RGB: + self.assertMatchesTestPattern(grain_out, max_diff=2) + elif self._get_bitdepth(fmt_in) == 32 and not self._is_rgb(fmt_in) and fmt_out == CogFrameFormat.S32_444_RGB: + self.assertMatchesTestPattern(grain_out, max_diff=1 << 10) # The potential errors in 32 bit conversions are very large + + # If we've decreased bit-depth *and* or changed from rgb to yuv then there is a smaller scope for error + elif ((self._get_bitdepth(fmt_out) < self._get_bitdepth(fmt_in)) or + (self._is_rgb(fmt_in) != self._is_rgb(fmt_out))): + self.assertMatchesTestPattern(grain_out, max_diff=1) + + # If we're in none of these cases then the transformation should be lossless + else: + self.assertMatchesTestPattern(grain_out) + else: + grain_rev = grain_out.convert(fmt_in) + + # The conversion from 10-bit 422 should be lossless + if fmt_in in [CogFrameFormat.v210, CogFrameFormat.S16_422_10BIT]: + self.assertMatchesTestPattern(grain_rev) + + # If we are not colour space converting and our input bit-depth is equal or lower to 10bits we have minor scope for rounding error + elif self._get_bitdepth(fmt_in) in [8, 10] and not self._is_rgb(fmt_in): + self.assertMatchesTestPattern(grain_rev, max_diff=1) + + # If we are significantly lowering the bit depth then there is potential for significant error when reversing the process + elif self._get_bitdepth(fmt_in) in [12, 16, 32] and not self._is_rgb(fmt_in): + self.assertMatchesTestPattern(grain_rev, max_diff=1 << (self._get_bitdepth(fmt_in) - 9)) + + # And even more if we are also colour converting + elif self._get_bitdepth(fmt_in) in [12, 16, 32] and self._is_rgb(fmt_in): + self.assertMatchesTestPattern(grain_rev, max_diff=1 << (self._get_bitdepth(fmt_in) - 8)) + + # Otherwise if we are only colour converting then the potential error is a small floating point rounding error + elif self._is_rgb(fmt_in): + self.assertMatchesTestPattern(grain_rev, max_diff=4) + + + def test_video_grain_create_discontiguous(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + cts = Timestamp.from_tai_sec_nsec("417798915:0") + ots = Timestamp.from_tai_sec_nsec("417798915:5") + sts = Timestamp.from_tai_sec_nsec("417798915:10") + + data = bytearray(11*1024*1024) + + grain = VideoGrain({ + "grain": { + "grain_type": "video", + "source_id": src_id, + "flow_id": flow_id, + "origin_timestamp": ots, + "sync_timestamp": sts, + "creation_timestamp": cts, + "rate": { + "numerator": 25, + "denominator": 1, + }, + "duration": { + "numerator": 1, + "denominator": 25, + }, + "cog_frame": { + "format": CogFrameFormat.S16_422_10BIT, + "width": 1920, + "height": 1080, + "layout": CogFrameLayout.FULL_FRAME, + "extension": 0, + "components": [ + { + "stride": 4096, + "width": 1920, + "height": 1080, + "length": 4423680, + "offset": 0 + }, + { + "stride": 2048, + "width": 960, + "height": 1080, + "length": 2211840, + "offset": 5*1024*1024 + }, + { + "stride": 2048, + "width": 960, + "height": 1080, + "length": 2211840, + "offset": 8*1024*1024 + }, + ] + } + } + }, data) + + for y in range(0, 16): + for x in range(0, 16): + grain.component_data[0][x, y] = (y*16 + x) & 0x3F + + for y in range(0, 16): + for x in range(0, 8): + grain.component_data[1][x, y] = (y*16 + x) & 0x3F + 0x40 + grain.component_data[2][x, y] = (y*16 + x) & 0x3F + 0x50 + + for y in range(0, 16): + for x in range(0, 16): + self.assertEqual(grain.data[y*grain.components[0].stride//2 + x], (y*16 + x) & 0x3F) + + for y in range(0, 16): + for x in range(0, 8): + self.assertEqual(grain.data[grain.components[1].offset//2 + y*grain.components[1].stride//2 + x], (y*16 + x) & 0x3F + 0x40) + self.assertEqual(grain.data[grain.components[2].offset//2 + y*grain.components[2].stride//2 + x], (y*16 + x) & 0x3F + 0x50) + + def test_copy(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + cts = Timestamp.from_tai_sec_nsec("417798915:0") + ots = Timestamp.from_tai_sec_nsec("417798915:5") + sts = Timestamp.from_tai_sec_nsec("417798915:10") + + with mock.patch.object(Timestamp, "get_time", return_value=cts): + grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + cog_frame_format=CogFrameFormat.S16_422_10BIT, + width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + grain.data[0] = 0x1BBC + + clone = copy(grain) + + self.assertEqual(grain.data[0], clone.data[0]) + + grain.data[0] = 0xCAFE + + self.assertEqual(grain.data[0], clone.data[0]) + + def test_deepcopy(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + cts = Timestamp.from_tai_sec_nsec("417798915:0") + ots = Timestamp.from_tai_sec_nsec("417798915:5") + sts = Timestamp.from_tai_sec_nsec("417798915:10") + + with mock.patch.object(Timestamp, "get_time", return_value=cts): + grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, sync_timestamp=sts, + cog_frame_format=CogFrameFormat.S16_422_10BIT, + width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + grain.data[0] = 0x1BBC + + clone = deepcopy(grain) + + self.assertEqual(grain.data[0], clone.data[0]) + + grain.data[0] = 0xCAFE + + self.assertNotEqual(grain.data[0], clone.data[0]) diff --git a/tests/test_grain.py b/tests/test_grain.py index 5edb4e3..db8f611 100644 --- a/tests/test_grain.py +++ b/tests/test_grain.py @@ -20,7 +20,7 @@ import uuid from mediagrains import Grain, VideoGrain, AudioGrain, CodedVideoGrain, CodedAudioGrain, EventGrain from mediagrains.cogenums import CogFrameFormat, CogFrameLayout, CogAudioFormat -from mediatimestamp.immutable import Timestamp, TimeOffset +from mediatimestamp.immutable import Timestamp, TimeOffset, TimeRange import mock from fractions import Fraction import json @@ -41,6 +41,7 @@ def test_empty_grain_creation(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, cts) self.assertEqual(grain.final_origin_timestamp(), cts) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(cts)) self.assertEqual(grain.sync_timestamp, cts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(0, 1)) @@ -88,6 +89,7 @@ def test_empty_grain_creation_with_odd_data(self): self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.source_id, src_id) self.assertEqual(grain.flow_id, flow_id) @@ -110,6 +112,7 @@ def test_empty_grain_creation_with_ots(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, ots) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(0, 1)) @@ -131,6 +134,7 @@ def test_empty_grain_creation_with_ots_and_sts(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(0, 1)) @@ -200,6 +204,7 @@ def test_empty_grain_with_meta(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -246,6 +251,7 @@ def test_empty_grain_setters(self): grain.origin_timestamp = ots self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) grain.sync_timestamp = sts self.assertEqual(grain.sync_timestamp, sts) @@ -329,6 +335,7 @@ def test_video_grain_create_YUV422_10bit(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -539,6 +546,7 @@ def test_video_grain_with_numeric_identifiers(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -644,6 +652,7 @@ def test_video_grain_create_with_ots_and_no_sts(self): self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, ots) self.assertEqual(grain.creation_timestamp, cts) @@ -659,6 +668,7 @@ def test_video_grain_create_with_no_ots_and_no_sts(self): self.assertEqual(grain.origin_timestamp, cts) self.assertEqual(grain.final_origin_timestamp(), cts) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(cts)) self.assertEqual(grain.sync_timestamp, cts) self.assertEqual(grain.creation_timestamp, cts) @@ -808,6 +818,28 @@ def test_grain_makes_videograin_without_data(self): self.assertEqual(grain.length, 0) self.assertEqual(grain.expected_length, 8192*1080) + def test_video_grain_normalise(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + ots = Timestamp.from_tai_sec_nsec("417798915:5") + + with mock.patch.object(Timestamp, "get_time", return_value=ots): + grain = VideoGrain(src_id, flow_id, origin_timestamp=ots, + rate=Fraction(25, 1), + cog_frame_format=CogFrameFormat.S16_422_10BIT, + width=1920, height=1080, cog_frame_layout=CogFrameLayout.FULL_FRAME) + + self.assertEqual(grain.origin_timestamp, ots) + self.assertNotEqual(grain.normalise_time(grain.origin_timestamp), + ots) + self.assertEqual(grain.normalise_time(grain.origin_timestamp), + ots.normalise(25, 1)) + self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertNotEqual(grain.normalise_time(grain.origin_timerange()), + TimeRange.from_single_timestamp(ots)) + self.assertEqual(grain.normalise_time(grain.origin_timerange()), + TimeRange.from_single_timestamp(ots).normalise(25, 1)) + def test_audio_grain_create_S16_PLANES(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") @@ -825,6 +857,7 @@ def test_audio_grain_create_S16_PLANES(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots + TimeOffset.from_count(1919, 48000, 1)) + self.assertEqual(grain.origin_timerange(), TimeRange(ots, ots + TimeOffset.from_count(1919, 48000, 1))) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -857,6 +890,7 @@ def test_audio_grain_create_fills_in_missing_sts(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots + TimeOffset.from_count(1919, 48000, 1)) + self.assertEqual(grain.origin_timerange(), TimeRange(ots, ots + TimeOffset.from_count(1919, 48000, 1))) self.assertEqual(grain.sync_timestamp, ots) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -887,6 +921,7 @@ def test_audio_grain_create_fills_in_missing_ots(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, cts) self.assertEqual(grain.final_origin_timestamp(), cts + TimeOffset.from_count(1919, 48000, 1)) + self.assertEqual(grain.origin_timerange(), TimeRange(cts, cts + TimeOffset.from_count(1919, 48000, 1))) self.assertEqual(grain.sync_timestamp, cts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -1033,6 +1068,29 @@ def test_grain_makes_audiograin(self): self.assertEqual(grain.meta, meta) self.assertEqual(grain.data, data) + def test_audio_grain_normalise(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + ots = Timestamp.from_tai_sec_nsec("417798915:2") + + with mock.patch.object(Timestamp, "get_time", return_value=ots): + grain = AudioGrain(src_id, flow_id, + cog_audio_format=CogAudioFormat.S16_PLANES, + channels=2, samples=1920, sample_rate=48000) + + final_ts = ots + TimeOffset.from_count(1920 - 1, 48000, 1) + + self.assertEqual(grain.origin_timestamp, ots) + self.assertNotEqual(grain.normalise_time(grain.origin_timestamp), + ots) + self.assertEqual(grain.normalise_time(grain.origin_timestamp), + ots.normalise(48000, 1)) + self.assertEqual(grain.final_origin_timestamp(), final_ts) + self.assertNotEqual(grain.normalise_time(grain.origin_timerange()), + TimeRange(ots, final_ts)) + self.assertEqual(grain.normalise_time(grain.origin_timerange()), + TimeRange(ots, final_ts).normalise(48000, 1)) + def test_coded_video_grain_create_VC2(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") @@ -1297,6 +1355,29 @@ def test_grain_makes_codedvideograin(self): self.assertEqual(grain.meta, meta) self.assertEqual(grain.data, data) + def test_coded_video_grain_normalise(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + ots = Timestamp.from_tai_sec_nsec("417798915:5") + + with mock.patch.object(Timestamp, "get_time", return_value=ots): + grain = CodedVideoGrain(src_id, flow_id, origin_timestamp=ots, + rate=Fraction(25, 1), + cog_frame_format=CogFrameFormat.VC2, + origin_width=1920, origin_height=1080, + cog_frame_layout=CogFrameLayout.FULL_FRAME) + + self.assertEqual(grain.origin_timestamp, ots) + self.assertNotEqual(grain.normalise_time(grain.origin_timestamp), + ots) + self.assertEqual(grain.normalise_time(grain.origin_timestamp), + ots.normalise(25, 1)) + self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertNotEqual(grain.normalise_time(grain.origin_timerange()), + TimeRange.from_single_timestamp(ots)) + self.assertEqual(grain.normalise_time(grain.origin_timerange()), + TimeRange.from_single_timestamp(ots).normalise(25, 1)) + def test_coded_audio_grain_create_MP1(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") @@ -1319,6 +1400,7 @@ def test_coded_audio_grain_create_MP1(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots + TimeOffset.from_count(1919, 48000, 1)) + self.assertEqual(grain.origin_timerange(), TimeRange(ots, ots + TimeOffset.from_count(1919, 48000, 1))) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -1358,6 +1440,7 @@ def test_coded_audio_grain_create_without_sts(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots + TimeOffset.from_count(1919, 48000, 1)) + self.assertEqual(grain.origin_timerange(), TimeRange(ots, ots + TimeOffset.from_count(1919, 48000, 1))) self.assertEqual(grain.sync_timestamp, ots) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -1396,6 +1479,7 @@ def test_coded_audio_grain_create_without_sts_or_ots(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, cts) self.assertEqual(grain.final_origin_timestamp(), cts + TimeOffset.from_count(1919, 48000, 1)) + self.assertEqual(grain.origin_timerange(), TimeRange(cts, cts + TimeOffset.from_count(1919, 48000, 1))) self.assertEqual(grain.sync_timestamp, cts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -1571,6 +1655,34 @@ def test_grain_makes_codedaudiograin(self): self.assertEqual(grain.meta, meta) self.assertEqual(grain.data, data) + def test_coded_audio_grain_normalise(self): + src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") + flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + ots = Timestamp.from_tai_sec_nsec("417798915:2") + + with mock.patch.object(Timestamp, "get_time", return_value=ots): + grain = CodedAudioGrain(src_id, flow_id, origin_timestamp=ots, + cog_audio_format=CogAudioFormat.MP1, + samples=1920, + channels=6, + priming=0, + remainder=0, + sample_rate=48000, + length=15360) + + final_ts = ots + TimeOffset.from_count(1920 - 1, 48000, 1) + + self.assertEqual(grain.origin_timestamp, ots) + self.assertNotEqual(grain.normalise_time(grain.origin_timestamp), + ots) + self.assertEqual(grain.normalise_time(grain.origin_timestamp), + ots.normalise(48000, 1)) + self.assertEqual(grain.final_origin_timestamp(), final_ts) + self.assertNotEqual(grain.normalise_time(grain.origin_timerange()), + TimeRange(ots, final_ts)) + self.assertEqual(grain.normalise_time(grain.origin_timerange()), + TimeRange(ots, final_ts).normalise(48000, 1)) + def test_event_grain_create(self): src_id = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") flow_id = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") @@ -1587,6 +1699,7 @@ def test_event_grain_create(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -1616,6 +1729,7 @@ def test_event_grain_create_without_sts(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, ots) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -1641,6 +1755,7 @@ def test_event_grain_create_without_sts_or_ots(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, cts) self.assertEqual(grain.final_origin_timestamp(), cts) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(cts)) self.assertEqual(grain.sync_timestamp, cts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) @@ -1662,6 +1777,7 @@ def test_event_grain_create_fills_in_empty_meta(self): self.assertEqual(grain.grain_type, "event") self.assertEqual(grain.origin_timestamp, cts) self.assertEqual(grain.final_origin_timestamp(), cts) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(cts)) self.assertEqual(grain.sync_timestamp, cts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(0, 1)) @@ -1725,6 +1841,7 @@ def test_event_grain_create_from_meta_and_data(self): self.assertEqual(grain.flow_id, flow_id) self.assertEqual(grain.origin_timestamp, ots) self.assertEqual(grain.final_origin_timestamp(), ots) + self.assertEqual(grain.origin_timerange(), TimeRange.from_single_timestamp(ots)) self.assertEqual(grain.sync_timestamp, sts) self.assertEqual(grain.creation_timestamp, cts) self.assertEqual(grain.rate, Fraction(25, 1)) diff --git a/tests/test_psnr.py b/tests/test_psnr.py new file mode 100644 index 0000000..56f7981 --- /dev/null +++ b/tests/test_psnr.py @@ -0,0 +1,192 @@ +#!/usr/bin/python +# +# Copyright 2019 British Broadcasting Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from __future__ import print_function +from __future__ import absolute_import + +from unittest import TestCase +from sys import version_info +import uuid + +from mediagrains import VideoGrain +from mediagrains.cogenums import CogFrameFormat +from mediagrains.comparison import compute_psnr + +SRC_ID = uuid.UUID("f18ee944-0841-11e8-b0b0-17cef04bd429") +FLOW_ID = uuid.UUID("f79ce4da-0841-11e8-9a5b-dfedb11bafeb") + + +pixel_ranges = { + CogFrameFormat.U8_444: (1, (16, 235-16), (128, 224), (128, 224), 8), + CogFrameFormat.U8_422: (1, (16, 235-16), (128, 224), (128, 224), 8), + CogFrameFormat.U8_420: (1, (16, 235-16), (128, 224), (128, 224), 8), + CogFrameFormat.S16_444_10BIT: (2, (64, 940-64), (512, 896), (512, 896), 10), + CogFrameFormat.S16_422_10BIT: (2, (64, 940-64), (512, 896), (512, 896), 10), + CogFrameFormat.S16_420_10BIT: (2, (64, 940-64), (512, 896), (512, 896), 10), + CogFrameFormat.S16_444_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584), 12), + CogFrameFormat.S16_422_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584), 12), + CogFrameFormat.S16_420_12BIT: (2, (256, 3760-256), (2048, 3584), (2048, 3584), 12), + CogFrameFormat.S16_444: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344), 16), + CogFrameFormat.S16_422: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344), 16), + CogFrameFormat.S16_420: (2, (4096, 60160-4096), (32768, 57344), (32768, 57344), 16), +} + + +def _create_grain(cog_frame_format): + return VideoGrain(SRC_ID, FLOW_ID, + cog_frame_format=cog_frame_format, + width=480, height=270) + + +def _set_colour_bars(vg, noise_mask=0xffff): + """The code, except for the noise_mask, was copied from testsignalgenerator. It was duplicated here to keep + the unit tests isolated. + + :params vg: A video GRAIN + :params noise_mask: A mask applied to the colour bar line pixels + """ + cog_frame_format = vg.format + intensity = 0.75 + + _bpp = pixel_ranges[cog_frame_format][0] + _steps = 8 + bs = 16 - pixel_ranges[cog_frame_format][4] + + values = [ + (int((0xFFFF >> bs) * intensity), 0x8000 >> bs, 0x8000 >> bs), + (int((0xE1FF >> bs) * intensity), 0x0000 >> bs, 0x9400 >> bs), + (int((0xB200 >> bs) * intensity), 0xABFF >> bs, 0x0000 >> bs), + (int((0x95FF >> bs) * intensity), 0x2BFF >> bs, 0x15FF >> bs), + (int((0x69FF >> bs) * intensity), 0xD400 >> bs, 0xEA00 >> bs), + (int((0x4C00 >> bs) * intensity), 0x5400 >> bs, 0xFFFF >> bs), + (int((0x1DFF >> bs) * intensity), 0xFFFF >> bs, 0x6BFF >> bs), + (int((0x0000 >> bs) * intensity), 0x8000 >> bs, 0x8000 >> bs)] + + lines = [bytearray(vg.components[0].width*_bpp), bytearray(vg.components[1].width*_bpp), bytearray(vg.components[2].width*_bpp)] + for c in range(0, 3): + for x in range(0, vg.components[c].width): + pos = x//(vg.components[c].width//_steps) + if _bpp == 1: + lines[c][x] = values[pos][c] & noise_mask + elif _bpp == 2: + lines[c][2*x + 0] = ((values[pos][c] & noise_mask) & 0xFF) + lines[c][2*x + 1] = ((values[pos][c] & noise_mask) >> 8) & 0xFF + + for c in range(0, 3): + for y in range(0, vg.components[c].height): + offset = vg.components[c].offset + y*vg.components[c].stride + vg.data[offset:offset + vg.components[c].width*_bpp] = lines[c] + + +def _convert_u8_uyvy(grain_u8): + grain_uyvy = _create_grain(CogFrameFormat.UYVY) + for y in range(0, grain_u8.height): + for x in range(0, grain_u8.width//2): + # U + grain_uyvy.data[y*grain_uyvy.components[0].stride + 4*x + 0] = grain_u8.data[grain_u8.components[1].offset + + y*grain_u8.components[1].stride + + x] + # Y + grain_uyvy.data[y*grain_uyvy.components[0].stride + 4*x + 1] = grain_u8.data[grain_u8.components[0].offset + + y*grain_u8.components[0].stride + + 2*x + 0] + # V + grain_uyvy.data[y*grain_uyvy.components[0].stride + 4*x + 2] = grain_u8.data[grain_u8.components[2].offset + + y*grain_u8.components[2].stride + + x] + # Y + grain_uyvy.data[y*grain_uyvy.components[0].stride + 4*x + 3] = grain_u8.data[grain_u8.components[0].offset + + y*grain_u8.components[0].stride + + 2*x + 1] + + return grain_uyvy + + +class TestPSNR(TestCase): + def _check_psnr_range(self, computed, ranges, max_diff): + for psnr, psnr_range in zip(computed, ranges): + if psnr < psnr_range - max_diff or psnr > psnr_range + max_diff: + return False + return True + + def _test_planar_format(self, cog_frame_format, expected): + grain_a = _create_grain(cog_frame_format) + _set_colour_bars(grain_a) + grain_b = _create_grain(cog_frame_format) + _set_colour_bars(grain_b, noise_mask=0xfffa) + + psnr = compute_psnr(grain_a, grain_b) + self.assertTrue(self._check_psnr_range(psnr, expected, 0.1), "{} != {}".format(psnr, expected)) + + def test_identical_data(self): + grain = _create_grain(CogFrameFormat.U8_422) + _set_colour_bars(grain, noise_mask=0xfa) + + self.assertEqual(compute_psnr(grain, grain), [float('Inf'), float('Inf'), float('Inf')]) + + def test_planar_8bit(self): + self._test_planar_format(CogFrameFormat.U8_422, [36.47984486113692, 39.45318336217709, 38.90095545159027]) + + def test_planar_10bit(self): + self._test_planar_format(CogFrameFormat.S16_422_10BIT, [48.8541475647564, 50.477799910245636, 50.477799910245636]) + + def test_planar_12bit(self): + self._test_planar_format(CogFrameFormat.S16_422_12BIT, [60.30687786176762, 62.525365357931186, 62.525365357931186]) + + def test_planar_16bit(self): + self._test_planar_format(CogFrameFormat.S16_422, [84.39126581514387, 86.60975331130743, 86.60975331130743]) + + def test_uyvy_format(self): + planar_grain_a = _create_grain(CogFrameFormat.U8_422) + _set_colour_bars(planar_grain_a) + grain_a = _convert_u8_uyvy(planar_grain_a) + + planar_grain_b = _create_grain(CogFrameFormat.U8_422) + _set_colour_bars(planar_grain_b, noise_mask=0xfffa) + grain_b = _convert_u8_uyvy(planar_grain_b) + + if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): + psnr = compute_psnr(grain_a, grain_b) + expected = [36.47984486113692, 39.45318336217709, 38.90095545159027] + self.assertTrue(self._check_psnr_range(psnr, expected, 0.1), + "{} != {}".format(psnr, expected)) + else: + with self.assertRaises(NotImplementedError): + compute_psnr(grain_a, grain_b) + + def test_mixed_format(self): + grain_a = _create_grain(CogFrameFormat.U8_422) + _set_colour_bars(grain_a) + + planar_grain_b = _create_grain(CogFrameFormat.U8_422) + _set_colour_bars(planar_grain_b, noise_mask=0xfffa) + grain_b = _convert_u8_uyvy(planar_grain_b) + + if version_info[0] > 3 or (version_info[0] == 3 and version_info[1] >= 6): + psnr = compute_psnr(grain_a, grain_b) + expected = [36.47984486113692, 39.45318336217709, 38.90095545159027] + self.assertTrue(self._check_psnr_range(psnr, expected, 0.1), + "{} != {}".format(psnr, expected)) + else: + with self.assertRaises(NotImplementedError): + compute_psnr(grain_a, grain_b) + + def test_compressed_unsupported(self): + grain = _create_grain(CogFrameFormat.H264) + + with self.assertRaises(NotImplementedError): + compute_psnr(grain, grain) diff --git a/tests/test_testsignalgenerator.py b/tests/test_testsignalgenerator.py index 5a03279..6547fcb 100644 --- a/tests/test_testsignalgenerator.py +++ b/tests/test_testsignalgenerator.py @@ -28,7 +28,7 @@ from math import sin, pi from mediagrains.cogenums import CogFrameFormat, CogAudioFormat -from mediagrains.testsignalgenerator import LumaSteps, Tone1K, Silence +from mediagrains.testsignalgenerator import LumaSteps, Tone1K, Silence, ColourBars, MovingBarOverlay src_id = UUID("f2b6a9b4-2ea8-11e8-a468-878cf869cbec") @@ -344,6 +344,309 @@ def test_lumasteps_with_step_2(self): rate.numerator, rate.denominator) +class TestColourBars(TestCase): + colourbars_expected_values_8bit = [ + (0xFF, 0x80, 0x80), + (0xE1, 0x00, 0x94), + (0xB2, 0xAB, 0x00), + (0x95, 0x2B, 0x15), + (0x69, 0xD4, 0xEA), + (0x4C, 0x54, 0xFF), + (0x1D, 0xFF, 0x6B), + (0x00, 0x80, 0x80)] + + colourbars_expected_values_10bit = [ + (0x3FF, 0x200, 0x200), + (0x387, 0x000, 0x250), + (0x2C8, 0x2AF, 0x000), + (0x257, 0x0AF, 0x057), + (0x1A7, 0x350, 0x3A8), + (0x130, 0x150, 0x3FF), + (0x077, 0x3FF, 0x1AF), + (0x000, 0x200, 0x200)] + + def test_colourbars100_u8_444(self): + """Testing that the ColourBars generator produces correct video frames + when the height is 4 lines and the width 240 pixels (to keep time taken + for testing under control""" + width = 240 + height = 4 + UUT = ColourBars(src_id, flow_id, + width, height, + intensity=1.0, + origin_timestamp=origin_timestamp, + cog_frame_format=CogFrameFormat.U8_444, + rate=Fraction(25, 1), + step=1) + + # Extracts the first 10 grains from the generator + grains = [grain for _, grain in zip(range(10), UUT)] + + ts = origin_timestamp + for grain in grains: + self.assertEqual(grain.source_id, src_id) + self.assertEqual(grain.flow_id, flow_id) + self.assertEqual(grain.origin_timestamp, ts) + self.assertEqual(grain.sync_timestamp, ts) + self.assertEqual(grain.format, CogFrameFormat.U8_444) + self.assertEqual(grain.rate, Fraction(25, 1)) + + Y = grain.data[grain.components[0].offset:grain.components[0].offset + grain.components[0].length] + U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] + V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] + + expected = self.colourbars_expected_values_8bit + + for y in range(0, height): + for x in range(0, width): + self.assertEqual(Y[y*grain.components[0].stride + x], expected[x//(width//8)][0]) + self.assertEqual(U[y*grain.components[1].stride + x], expected[x//(width//8)][1]) + self.assertEqual(V[y*grain.components[2].stride + x], expected[x//(width//8)][2]) + + ts = Timestamp.from_count(ts.to_count(25, 1) + 1, 25, 1) + + def test_colourbars75_u8_444(self): + """Testing that the ColourBars generator produces correct video frames + when the height is 4 lines and the width 240 pixels (to keep time taken + for testing under control""" + width = 240 + height = 4 + UUT = ColourBars(src_id, flow_id, + width, height, + intensity=0.75, + origin_timestamp=origin_timestamp, + cog_frame_format=CogFrameFormat.U8_444, + rate=Fraction(25, 1), + step=1) + + # Extracts the first 10 grains from the generator + grains = [grain for _, grain in zip(range(10), UUT)] + + ts = origin_timestamp + for grain in grains: + self.assertEqual(grain.source_id, src_id) + self.assertEqual(grain.flow_id, flow_id) + self.assertEqual(grain.origin_timestamp, ts) + self.assertEqual(grain.sync_timestamp, ts) + self.assertEqual(grain.format, CogFrameFormat.U8_444) + self.assertEqual(grain.rate, Fraction(25, 1)) + + Y = grain.data[grain.components[0].offset:grain.components[0].offset + grain.components[0].length] + U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] + V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] + + expected = self.colourbars_expected_values_8bit + + for y in range(0, height): + for x in range(0, width): + self.assertEqual(Y[y*grain.components[0].stride + x], int(0.75*expected[x//(width//8)][0])) + self.assertEqual(U[y*grain.components[1].stride + x], expected[x//(width//8)][1]) + self.assertEqual(V[y*grain.components[2].stride + x], expected[x//(width//8)][2]) + + ts = Timestamp.from_count(ts.to_count(25, 1) + 1, 25, 1) + + def test_colourbars75_s16_422_10bit(self): + """Testing that the ColourBars generator produces correct video frames + when the height is 4 lines and the width 240 pixels (to keep time taken + for testing under control""" + width = 240 + height = 4 + UUT = ColourBars(src_id, flow_id, + width, height, + intensity=0.75, + origin_timestamp=origin_timestamp, + cog_frame_format=CogFrameFormat.S16_422_10BIT, + rate=Fraction(25, 1), + step=1) + + # Extracts the first 10 grains from the generator + grains = [grain for _, grain in zip(range(10), UUT)] + + ts = origin_timestamp + for grain in grains: + self.assertEqual(grain.source_id, src_id) + self.assertEqual(grain.flow_id, flow_id) + self.assertEqual(grain.origin_timestamp, ts) + self.assertEqual(grain.sync_timestamp, ts) + self.assertEqual(grain.format, CogFrameFormat.S16_422_10BIT) + self.assertEqual(grain.rate, Fraction(25, 1)) + + Y = grain.data[grain.components[0].offset:grain.components[0].offset + grain.components[0].length] + U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] + V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] + + expected = self.colourbars_expected_values_10bit + + for y in range(0, height): + for x in range(0, width//2): + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 0], int(0.75*expected[(2*x + 0)//(width//8)][0]) & 0xFF) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 1], int(0.75*expected[(2*x + 0)//(width//8)][0]) >> 8) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 2], int(0.75*expected[(2*x + 1)//(width//8)][0]) & 0xFF) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 3], int(0.75*expected[(2*x + 1)//(width//8)][0]) >> 8) + self.assertEqual(U[y*grain.components[1].stride + 2*x + 0], expected[x//(width//16)][1] & 0xFF) + self.assertEqual(U[y*grain.components[1].stride + 2*x + 1], expected[x//(width//16)][1] >> 8) + self.assertEqual(V[y*grain.components[2].stride + 2*x + 0], expected[x//(width//16)][2] & 0xFF) + self.assertEqual(V[y*grain.components[2].stride + 2*x + 1], expected[x//(width//16)][2] >> 8) + + ts = Timestamp.from_count(ts.to_count(25, 1) + 1, 25, 1) + + +class TestMovingBarOverlay(TestCase): + def test_movingbar_colourbars100_u8_444(self): + """Testing that the ColourBars with MovingBarOverlay generators produces correct video frames + when the height is 4 lines and the width 240 pixels (to keep time taken for testing under control""" + width = 240 + height = 4 + UUT = MovingBarOverlay(ColourBars(src_id, flow_id, + width, height, + intensity=1.0, + origin_timestamp=origin_timestamp, + cog_frame_format=CogFrameFormat.U8_444, + rate=Fraction(25, 1), + step=1), + height=1) + + # Extracts the first 10 grains from the generator + grains = [grain for _, grain in zip(range(10), UUT)] + + ts = origin_timestamp + for grain in grains: + self.assertEqual(grain.source_id, src_id) + self.assertEqual(grain.flow_id, flow_id) + self.assertEqual(grain.origin_timestamp, ts) + self.assertEqual(grain.sync_timestamp, ts) + self.assertEqual(grain.format, CogFrameFormat.U8_444) + self.assertEqual(grain.rate, Fraction(25, 1)) + + Y = grain.data[grain.components[0].offset:grain.components[0].offset + grain.components[0].length] + U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] + V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] + + expected = TestColourBars.colourbars_expected_values_8bit + + fnum = grain.origin_timestamp.to_count(grain.rate.numerator, grain.rate.denominator) + for y in range(0, height): + if (fnum % height) == y: + for x in range(0, width): + self.assertEqual(Y[y*grain.components[0].stride + x], 16) + self.assertEqual(U[y*grain.components[1].stride + x], 128) + self.assertEqual(V[y*grain.components[2].stride + x], 128) + else: + for x in range(0, width): + self.assertEqual(Y[y*grain.components[0].stride + x], expected[x//(width//8)][0]) + self.assertEqual(U[y*grain.components[1].stride + x], expected[x//(width//8)][1]) + self.assertEqual(V[y*grain.components[2].stride + x], expected[x//(width//8)][2]) + + ts = Timestamp.from_count(ts.to_count(25, 1) + 1, 25, 1) + + def test_movingbar_colourbars75_u8_444(self): + """Testing that the ColourBars with MovingBarOverlay generator produces correct video frames + when the height is 4 lines and the width 240 pixels (to keep time taken for testing under control""" + width = 240 + height = 4 + UUT = MovingBarOverlay(ColourBars(src_id, flow_id, + width, height, + intensity=0.75, + origin_timestamp=origin_timestamp, + cog_frame_format=CogFrameFormat.U8_444, + rate=Fraction(25, 1), + step=1), + height=1) + + # Extracts the first 10 grains from the generator + grains = [grain for _, grain in zip(range(10), UUT)] + + ts = origin_timestamp + for grain in grains: + self.assertEqual(grain.source_id, src_id) + self.assertEqual(grain.flow_id, flow_id) + self.assertEqual(grain.origin_timestamp, ts) + self.assertEqual(grain.sync_timestamp, ts) + self.assertEqual(grain.format, CogFrameFormat.U8_444) + self.assertEqual(grain.rate, Fraction(25, 1)) + + Y = grain.data[grain.components[0].offset:grain.components[0].offset + grain.components[0].length] + U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] + V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] + + expected = TestColourBars.colourbars_expected_values_8bit + + fnum = grain.origin_timestamp.to_count(grain.rate.numerator, grain.rate.denominator) + + for y in range(0, height): + if (fnum % height) == y: + for x in range(0, width): + self.assertEqual(Y[y*grain.components[0].stride + x], 16) + self.assertEqual(U[y*grain.components[1].stride + x], 128) + self.assertEqual(V[y*grain.components[2].stride + x], 128) + else: + for x in range(0, width): + self.assertEqual(Y[y*grain.components[0].stride + x], int(0.75*expected[x//(width//8)][0])) + self.assertEqual(U[y*grain.components[1].stride + x], expected[x//(width//8)][1]) + self.assertEqual(V[y*grain.components[2].stride + x], expected[x//(width//8)][2]) + + ts = Timestamp.from_count(ts.to_count(25, 1) + 1, 25, 1) + + def test_movingbar_colourbars75_s16_422_10bit(self): + """Testing that the ColourBars with MovingBarOverlay generator produces correct video frames + when the height is 4 lines and the width 240 pixels (to keep time taken for testing under control""" + width = 240 + height = 4 + UUT = MovingBarOverlay(ColourBars(src_id, flow_id, + width, height, + intensity=0.75, + origin_timestamp=origin_timestamp, + cog_frame_format=CogFrameFormat.S16_422_10BIT, + rate=Fraction(25, 1), + step=1), + height=1) + + # Extracts the first 10 grains from the generator + grains = [grain for _, grain in zip(range(10), UUT)] + + ts = origin_timestamp + for grain in grains: + self.assertEqual(grain.source_id, src_id) + self.assertEqual(grain.flow_id, flow_id) + self.assertEqual(grain.origin_timestamp, ts) + self.assertEqual(grain.sync_timestamp, ts) + self.assertEqual(grain.format, CogFrameFormat.S16_422_10BIT) + self.assertEqual(grain.rate, Fraction(25, 1)) + + Y = grain.data[grain.components[0].offset:grain.components[0].offset + grain.components[0].length] + U = grain.data[grain.components[1].offset:grain.components[1].offset + grain.components[1].length] + V = grain.data[grain.components[2].offset:grain.components[2].offset + grain.components[2].length] + + expected = TestColourBars.colourbars_expected_values_10bit + + fnum = grain.origin_timestamp.to_count(grain.rate.numerator, grain.rate.denominator) + + for y in range(0, height): + if (fnum % height) == y: + for x in range(0, width//2): + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 0], 64) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 1], 0) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 2], 64) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 3], 0) + self.assertEqual(U[y*grain.components[1].stride + 2*x + 0], 0) + self.assertEqual(U[y*grain.components[1].stride + 2*x + 1], 2) + self.assertEqual(V[y*grain.components[2].stride + 2*x + 0], 0) + self.assertEqual(V[y*grain.components[2].stride + 2*x + 1], 2) + else: + for x in range(0, width//2): + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 0], int(0.75*expected[(2*x + 0)//(width//8)][0]) & 0xFF) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 1], int(0.75*expected[(2*x + 0)//(width//8)][0]) >> 8) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 2], int(0.75*expected[(2*x + 1)//(width//8)][0]) & 0xFF) + self.assertEqual(Y[y*grain.components[0].stride + 4*x + 3], int(0.75*expected[(2*x + 1)//(width//8)][0]) >> 8) + self.assertEqual(U[y*grain.components[1].stride + 2*x + 0], expected[x//(width//16)][1] & 0xFF) + self.assertEqual(U[y*grain.components[1].stride + 2*x + 1], expected[x//(width//16)][1] >> 8) + self.assertEqual(V[y*grain.components[2].stride + 2*x + 0], expected[x//(width//16)][2] & 0xFF) + self.assertEqual(V[y*grain.components[2].stride + 2*x + 1], expected[x//(width//16)][2] >> 8) + + + ts = Timestamp.from_count(ts.to_count(25, 1) + 1, 25, 1) + + if __name__ == "__main__": import unittest diff --git a/tox.ini b/tox.ini index 10caf61..46f3906 100644 --- a/tox.ini +++ b/tox.ini @@ -4,14 +4,16 @@ # and then run "tox" from this directory. [tox] -envlist = py27, py3 +envlist = py27, py36 [testenv] commands = - coverage run --source=./mediagrains -m unittest discover -s tests - coverage annotate - coverage report + py27: python -m unittest discover -s tests -p test_*.py + py35: python -m unittest discover -s tests -p test_*.py + py36: python -m unittest discover -s tests -p test*_*.py + py37: python -m unittest discover -s tests -p test*_*.py deps = hypothesis >= 4.0.0 mock coverage + py36: aiofiles