diff --git a/.travis.yml b/.travis.yml index f07c42fe8..f547a3d9e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,7 +11,9 @@ install: script: - set -e - python --version - - python test/run_all.py + - python -m coverage run test/run_all.py + - python -m coverage xml -i --include */asammdf/* + - if [[ "$TRAVIS_PULL_REQUEST" = "false" ]]; then python-codacy-coverage -r coverage.xml; fi - python -m sphinx -nW -b html documentation documentation/_build/html after_success: diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md index 40faae1ad..b5deec961 100644 --- a/ISSUE_TEMPLATE.md +++ b/ISSUE_TEMPLATE.md @@ -1,4 +1,3 @@ -# Pyhton version Please write here the output of printing ``sys.version`` # Platform information @@ -8,8 +7,4 @@ Please write here the output of printing ``platform.platform()`` Please write here the output of printing ``asammdf.__version__`` # Description -<<<<<<< HEAD Please describe the issue here. -======= -Please describe the issue here. ->>>>>>> development diff --git a/README.md b/README.md index 8e63541ed..914ddc357 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,3 @@ -[![PyPI version](https://badge.fury.io/py/asammdf.svg)](https://badge.fury.io/py/asammdf) [![Documentation Status](http://readthedocs.org/projects/asammdf/badge/?version=master)](http://asammdf.readthedocs.io/en/master/?badge=stable) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/a3da21da90ca43a5b72fc24b56880c99)](https://www.codacy.com/app/danielhrisca/asammdf?utm_source=github.com&utm_medium=referral&utm_content=danielhrisca/asammdf&utm_campaign=badger) - -[![Build Status](https://travis-ci.org/danielhrisca/asammdf.svg?branch=master)](https://travis-ci.org/danielhrisca/asammdf) *asammdf* is a fast parser/editor for ASAM (Associtation for Standardisation of Automation and Measuring Systems) MDF (Measurement Data Format) files. @@ -9,17 +6,24 @@ *asammdf* works on Python 2.7, and Python >= 3.4 (Travis CI tests done with Python 2.7 and Python >= 3.5) +# Status +! | master | development +--|--|-- +Travis CI | [![Build Status](https://travis-ci.org/danielhrisca/asammdf.svg?branch=master)](https://travis-ci.org/danielhrisca/asammdf) | [![Build Status](https://travis-ci.org/danielhrisca/asammdf.svg?branch=development)](https://travis-ci.org/danielhrisca/asammdf) +Codacy | [![Codacy Badge](https://api.codacy.com/project/badge/Grade/a3da21da90ca43a5b72fc24b56880c99?branch=master)](https://www.codacy.com/app/danielhrisca/asammdf?utm_source=github.com&utm_medium=referral&utm_content=danielhrisca/asammdf&utm_campaign=badger) | [![Codacy Badge](https://api.codacy.com/project/badge/Grade/a3da21da90ca43a5b72fc24b56880c99?branch=development)](https://www.codacy.com/app/danielhrisca/asammdf?utm_source=github.com&utm_medium=referral&utm_content=danielhrisca/asammdf&utm_campaign=badger) +Coverage | [![Codacy Badge](https://api.codacy.com/project/badge/Coverage/a3da21da90ca43a5b72fc24b56880c99?branch=master)](https://www.codacy.com/app/danielhrisca/asammdf?utm_source=github.com&utm_medium=referral&utm_content=danielhrisca/asammdf&utm_campaign=Badge_Coverage) | [![Codacy Badge](https://api.codacy.com/project/badge/Coverage/a3da21da90ca43a5b72fc24b56880c99?branch=development)](https://www.codacy.com/app/danielhrisca/asammdf?utm_source=github.com&utm_medium=referral&utm_content=danielhrisca/asammdf&utm_campaign=Badge_Coverage) +ReadTheDocs | [![Documentation Status](http://readthedocs.org/projects/asammdf/badge/?version=master)](http://asammdf.readthedocs.io/en/master/?badge=stable) | [![Documentation Status](http://readthedocs.org/projects/asammdf/badge/?version=development)](http://asammdf.readthedocs.io/en/development/?badge=stable) +PyPI | [![PyPI version](https://badge.fury.io/py/asammdf.svg)](https://badge.fury.io/py/asammdf) | + -Project goals -============= +# Project goals The main goals for this library are: * to be faster than the other Python based mdf libraries * to have clean and easy to understand code base * to have minimal 3-rd party dependencies -Features -======== +# Features * create new mdf files from scratch * append new channels @@ -52,8 +56,7 @@ Features * usually a measurement will have channels from different sources at different rates * the *Signal* class facilitates operations with such channels -Major features not implemented (yet) -==================================== +# Major features not implemented (yet) * for version 3 * functionality related to sample reduction block (but the class is defined) @@ -69,8 +72,7 @@ Major features not implemented (yet) * channels with default X axis * channels with reference to attachment -Usage -===== +# Usage ```python from asammdf import MDF @@ -94,14 +96,12 @@ Usage ``` Check the *examples* folder for extended usage demo, or the documentation -http://asammdf.readthedocs.io/en/master/examples.html +http://asammdf.readthedocs.io/en/development/examples.html -Documentation -============= -http://asammdf.readthedocs.io/en/master +# Documentation +http://asammdf.readthedocs.io/en/development -Installation -============ +# Installation *asammdf* is available on * github: https://github.com/danielhrisca/asammdf/ @@ -111,8 +111,7 @@ Installation pip install asammdf ``` -Dependencies -============ +# Dependencies asammdf uses the following libraries * numpy : the heart that makes all tick @@ -127,8 +126,7 @@ optional dependencies needed for exports * xlsxwriter : for Excel export * scipy : for Matlab .mat export -Benchmarks -========== +# Benchmarks -http://asammdf.readthedocs.io/en/master/benchmarks.html +http://asammdf.readthedocs.io/en/development/benchmarks.html diff --git a/README.rst b/README.rst index 43727e80e..7082f7c90 100644 --- a/README.rst +++ b/README.rst @@ -90,11 +90,11 @@ Usage Check the *examples* folder for extended usage demo, or the documentation -http://asammdf.readthedocs.io/en/master/examples.html +http://asammdf.readthedocs.io/en/development/examples.html Documentation ============= -http://asammdf.readthedocs.io/en/master +http://asammdf.readthedocs.io/en/development Installation ============ @@ -128,7 +128,7 @@ optional dependencies needed for exports Benchmarks ========== -Graphical results can be seen here at http://asammdf.readthedocs.io/en/master/benchmarks.html +Graphical results can be seen here at http://asammdf.readthedocs.io/en/development/benchmarks.html Python 3 x86 diff --git a/asammdf/__init__.py b/asammdf/__init__.py index f7a3ddd2f..da82f7991 100644 --- a/asammdf/__init__.py +++ b/asammdf/__init__.py @@ -1,18 +1,18 @@ # -*- coding: utf-8 -*- """ asammdf is a parser and editor for ASAM MDF files """ -from .mdf2 import MDF2 -from .mdf3 import MDF3 -from .mdf4 import MDF4 + +from .mdf_v2 import MDF2 +from .mdf_v3 import MDF3 +from .mdf_v4 import MDF4 from .mdf import MDF, SUPPORTED_VERSIONS from .signal import Signal from .version import __version__ - __all__ = [ '__version__', 'configure', 'MDF', - 'MDF2', + 'MDF2' 'MDF3', 'MDF4', 'Signal', @@ -27,6 +27,10 @@ def configure( overwrite=None): """ configure asammdf parameters + Note + ---- + this is not thread safe + Parameters ---------- integer_compacting : bool @@ -46,7 +50,6 @@ def configure( """ if integer_compacting is not None: - MDF2._compact_integers_on_append = bool(integer_compacting) MDF3._compact_integers_on_append = bool(integer_compacting) MDF4._compact_integers_on_append = bool(integer_compacting) @@ -57,6 +60,5 @@ def configure( MDF4._split_data_blocks = bool(split_data_blocks) if overwrite is not None: - MDF2._overwrite = bool(overwrite) MDF3._overwrite = bool(overwrite) MDF4._overwrite = bool(overwrite) diff --git a/asammdf/mdf.py b/asammdf/mdf.py index 67db77a25..cf5a015a0 100644 --- a/asammdf/mdf.py +++ b/asammdf/mdf.py @@ -3,7 +3,7 @@ import csv import os -from collections import defaultdict +import sys from warnings import warn from functools import reduce from struct import unpack @@ -11,22 +11,23 @@ import numpy as np from pandas import DataFrame -from .mdf2 import MDF2 -from .mdf3 import MDF3 -from .mdf4 import MDF4 -from .utils import MdfException -from .v2blocks import Channel as ChannelV2 -from .v3blocks import TextBlock as TextBlockV3 -from .v3blocks import Channel as ChannelV3 -from .v4blocks import TextBlock as TextBlockV4 +from .mdf_v2 import MDF2 +from .mdf_v3 import MDF3 +from .mdf_v4 import MDF4 +from .utils import MdfException, get_text_v3, get_text_v4 +from .v2_v3_blocks import TextBlock as TextBlockV3 +from .v2_v3_blocks import Channel as ChannelV3 +from .v4_blocks import Channel as ChannelV4 +from .v4_blocks import TextBlock as TextBlockV4 +from .v4_blocks import ChannelArrayBlock +PYVERSION = sys.version_info[0] -MDF2_VERSIONS = ('2.00', '2.14') +MDF2_VERSIONS = ('2.00', '2.10', '2.14') MDF3_VERSIONS = ('3.00', '3.10', '3.20', '3.30') MDF4_VERSIONS = ('4.00', '4.10', '4.11') SUPPORTED_VERSIONS = MDF2_VERSIONS + MDF3_VERSIONS + MDF4_VERSIONS - __all__ = ['MDF', 'SUPPORTED_VERSIONS'] @@ -47,10 +48,13 @@ class MDF(object): * if *minimum* only minimal data is loaded into RAM version : string - mdf file version from ('2.00', '2.14', '3.00', '3.10', '3.20', '3.30', - '4.00', '4.10', '4.11'); default '4.10' + mdf file version from ('2.00', '2.10', '2.14', '3.00', '3.10', '3.20', + '3.30', '4.00', '4.10', '4.11'); default '4.10' """ + + _iter_channels = True + def __init__(self, name=None, memory='full', version='4.10'): if name: if os.path.isfile(name): @@ -75,18 +79,18 @@ def __init__(self, name=None, memory='full', version='4.10'): else: raise MdfException('File "{}" does not exist'.format(name)) else: - if version in MDF3_VERSIONS: + if version in MDF2_VERSIONS: self._mdf = MDF3( version=version, memory=memory, ) - elif version in MDF4_VERSIONS: - self._mdf = MDF4( + elif version in MDF3_VERSIONS: + self._mdf = MDF3( version=version, memory=memory, ) - elif version in MDF2_VERSIONS: - self._mdf = MDF2( + elif version in MDF4_VERSIONS: + self._mdf = MDF4( version=version, memory=memory, ) @@ -105,11 +109,8 @@ def _excluded_channels(self, index): group = self.groups[index] excluded_channels = set() - try: - master_index = self.masters_db[index] - excluded_channels.add(master_index) - except KeyError: - pass + master_index = self.masters_db.get(index, -1) + excluded_channels.add(master_index) channels = group['channels'] @@ -124,9 +125,10 @@ def _excluded_channels(self, index): for dependencies in group['channel_dependencies']: if dependencies is None: continue - if all(dep['id'] == b'##CN' for dep in dependencies): - for ch in dependencies: - excluded_channels.add(channels.index(ch)) + if all(not isinstance(dep, ChannelArrayBlock) + for dep in dependencies): + for channel in dependencies: + excluded_channels.add(channels.index(channel)) else: for dep in dependencies: for ch_nr, gp_nr in dep.referenced_channels: @@ -135,14 +137,22 @@ def _excluded_channels(self, index): return excluded_channels + def __contains__(self, channel): + return channel in self.channels_db + + def __iter__(self): + # the default is to yield from iter_channels + for signal in self.iter_channels(): + yield signal + def convert(self, to, memory='full'): """convert MDF to other versions Parameters ---------- to : str - new mdf version from ('2.00', '2.14', '3.00', '3.10', '3.20', - '3.30', '4.00', '4.10', '4.11') + new mdf file version from ('2.00', '2.10', '2.14', '3.00', '3.10', + '3.20', '3.30', '4.00', '4.10', '4.11'); default '4.10' memory : str memory option; default `full` @@ -153,21 +163,23 @@ def convert(self, to, memory='full'): """ if to not in SUPPORTED_VERSIONS: - message = ('Unknown output mdf version "{}".' - ' Available versions are {}') + message = ( + 'Unknown output mdf version "{}".' + ' Available versions are {}' + ) warn(message.format(to, SUPPORTED_VERSIONS)) - return + out = None else: out = MDF(version=to, memory=memory) # walk through all groups and get all channels - for i, gp in enumerate(self.groups): + for i, group in enumerate(self.groups): sigs = [] excluded_channels = self._excluded_channels(i) - data = self._load_group_data(gp) + data = self._load_group_data(group) - for j, _ in enumerate(gp['channels']): + for j, _ in enumerate(group['channels']): if j in excluded_channels: continue else: @@ -180,7 +192,8 @@ def convert(self, to, memory='full'): source_info.format(self.version, to), common_timebase=True, ) - return out + + return out def cut(self, start=None, stop=None, whence=0): """convert MDF to other versions @@ -219,7 +232,7 @@ def cut(self, start=None, stop=None, whence=0): index=master_index, samples_only=True, ) - if len(master): + if master.size: timestamps.append(master[0]) first_timestamp = np.amin(timestamps) if start is not None: @@ -231,13 +244,13 @@ def cut(self, start=None, stop=None, whence=0): del timestamps # walk through all groups and get all channels - for i, gp in enumerate(self.groups): + for i, group in enumerate(self.groups): sigs = [] excluded_channels = self._excluded_channels(i) - data = self._load_group_data(gp) + data = self._load_group_data(group) - for j, _ in enumerate(gp['channels']): + for j, _ in enumerate(group['channels']): if j in excluded_channels: continue sig = self.get( @@ -323,9 +336,9 @@ def export(self, fmt, filename=None): else: if not name.endswith('.hdf'): name = os.path.splitext(name)[0] + '.hdf' - with HDF5(name, 'w') as f: + with HDF5(name, 'w') as hdf: # header information - group = f.create_group(os.path.basename(name)) + group = hdf.create_group(os.path.basename(name)) if self.version in MDF2_VERSIONS + MDF3_VERSIONS: for item in header_items: @@ -337,7 +350,7 @@ def export(self, fmt, filename=None): # that will hold the name of the master channel for i, grp in enumerate(self.groups): group_name = r'/' + 'DataGroup_{}'.format(i + 1) - group = f.create_group(group_name) + group = hdf.create_group(group_name) master_index = self.masters_db.get(i, -1) @@ -363,9 +376,9 @@ def export(self, fmt, filename=None): return else: excel_name = os.path.splitext(name)[0] - nr = len(self.groups) + count = len(self.groups) for i, grp in enumerate(self.groups): - print('Exporting group {} of {}'.format(i+1, nr)) + print('Exporting group {} of {}'.format(i + 1, count)) data = self._load_group_data(grp) @@ -374,49 +387,47 @@ def export(self, fmt, filename=None): workbook = xlsxwriter.Workbook(wb_name) bold = workbook.add_format({'bold': True}) - ws = workbook.add_worksheet("Information") - if self.version in MDF2_VERSIONS + MDF3_VERSIONS: + sheet = workbook.add_worksheet(group_name) for j, item in enumerate(header_items): - - ws.write(j, 0, item.title(), bold) - ws.write(j, 1, self.header[item].decode('latin-1')) - - ws = workbook.add_worksheet(group_name) + sheet.write(j, 0, item.title(), bold) + sheet.write(j, 1, self.header[item].decode('latin-1')) # the sheet header has 3 rows # the channel name and unit 'YY [xx]' # the channel comment # the flag for data grup master channel - ws.write(0, 0, 'Channel', bold) - ws.write(1, 0, 'comment', bold) - ws.write(2, 0, 'is master', bold) + sheet.write(0, 0, 'Channel', bold) + sheet.write(1, 0, 'comment', bold) + sheet.write(2, 0, 'is master', bold) - master_index = self.masters_db[i] + master_index = self.masters_db.get(i, -1) for j in range(grp['channel_group']['cycles_nr']): - ws.write(j+3, 0, str(j)) + sheet.write(j + 3, 0, str(j)) for j, _ in enumerate(grp['channels']): sig = self.get(group=i, index=j, data=data) col = j + 1 - sig_description = '{} [{}]'.format(sig.name, - sig.unit) + sig_description = '{} [{}]'.format( + sig.name, + sig.unit, + ) comment = sig.comment if sig.comment else '' - ws.write(0, col, sig_description) - ws.write(1, col, comment) + sheet.write(0, col, sig_description) + sheet.write(1, col, comment) if j == master_index: - ws.write(2, col, 'x') - ws.write_column(3, col, sig.samples.astype(str)) + sheet.write(2, col, 'x') + sheet.write_column(3, col, sig.samples.astype(str)) workbook.close() elif fmt == 'csv': csv_name = os.path.splitext(name)[0] - nr = len(self.groups) + count = len(self.groups) for i, grp in enumerate(self.groups): - print('Exporting group {} of {}'.format(i+1, nr)) + print('Exporting group {} of {}'.format(i + 1, count)) data = self._load_group_data(grp) group_name = 'DataGroup_{}'.format(i + 1) @@ -425,15 +436,19 @@ def export(self, fmt, filename=None): writer = csv.writer(csvfile, delimiter=';') ch_nr = len(grp['channels']) - channels = [self.get(group=i, index=j, data=data) - for j in range(ch_nr)] + channels = [ + self.get(group=i, index=j, data=data) + for j in range(ch_nr) + ] - master_index = self.masters_db[i] + master_index = self.masters_db.get(i, -1) cycles = grp['channel_group']['cycles_nr'] names_row = ['Channel', ] - names_row += ['{} [{}]'.format(ch.name, ch.unit) - for ch in channels] + names_row += [ + '{} [{}]'.format(ch.name, ch.unit) + for ch in channels + ] writer.writerow(names_row) comment_row = ['comment', ] @@ -441,8 +456,10 @@ def export(self, fmt, filename=None): writer.writerow(comment_row) master_row = ['Is master', ] - master_row += ['x' if j == master_index else '' - for j in range(ch_nr)] + master_row += [ + 'x' if j == master_index else '' + for j in range(ch_nr) + ] writer.writerow(master_row) vals = [np.array(range(cycles), dtype=np.uint32), ] @@ -464,6 +481,7 @@ def export(self, fmt, filename=None): channel = 'DataGroup_{}_{}' for i, grp in enumerate(self.groups): + master_index = self.masters_db.get(i, -1) data = self._load_group_data(grp) for j, _ in enumerate(grp['channels']): sig = self.get( @@ -483,6 +501,12 @@ def export(self, fmt, filename=None): long_field_names=True, do_compression=True, ) + else: + message = ( + 'Unsopported export type "{}". ' + 'Please select "csv", "excel", "hdf5" or "mat"' + ) + warn(message.format(fmt)) def filter(self, channels, memory=None): """ return new *MDF* object that contains only the channels listed in @@ -491,7 +515,13 @@ def filter(self, channels, memory=None): Parameters ---------- channels : list - list of channel names to be filtered + list of items to be filtered; each item can be : + + * a channel name string + * (channel_name, group index, channel index) list or tuple + * (channel name, group index) list or tuple + * (None, group index, channel index) lsit or tuple + memory : str memory option for filtered mdf; default None in which case the original file's memory option is used @@ -501,53 +531,100 @@ def filter(self, channels, memory=None): mdf : MDF new MDF file + Examples + -------- + >>> from asammdf import MDF, Signal + >>> import numpy as np + >>> t = np.arange(5) + >>> s = np.ones(5) + >>> mdf = MDF() + >>> for i in range(4): + ... sigs = [Signal(s*(i*10+j), t, name='SIG') for j in range(1,4)] + ... mdf.append(sigs) + ... + >>> filtered = mdf.filter(['SIG', ('SIG', 3, 1), ['SIG', 2], (None, 1, 2)]) + >>> for gp_nr, ch_nr in filtered.channels_db['SIG']: + ... print(filtered.get(group=gp_nr, index=ch_nr)) + ... + + + + + """ # group channels by group index gps = {} - excluded_channels = defaultdict(list) - for ch in channels: - if ch in self.channels_db: - for group, index in self.channels_db[ch]: + + for item in channels: + if isinstance(item, (list, tuple)): + if len(item) not in (2, 3): + raise MdfException( + 'The items used for filtering must be strings, ' + 'or they must match the first 3 argumens of the get ' + 'method' + ) + else: + group, index = self._validate_channel_selection(*item) if group not in gps: gps[group] = set() gps[group].add(index) - if self.version in MDF2_VERSIONS + MDF3_VERSIONS: - dep = group['channel_dependencies'][index] - if dep: + else: + name = item + group, index = self._validate_channel_selection(name) + if group not in gps: + gps[group] = set() + gps[group].add(index) + + for group_index, indexes in gps.items(): + grp = self.groups[group_index] + excluded_channels = set() + for index in indexes: + if self.version in MDF2_VERSIONS + MDF3_VERSIONS: + dep = grp['channel_dependencies'][index] + if dep: + for ch_nr, gp_nr in dep.referenced_channels: + if gp_nr == group: + excluded_channels.add(ch_nr) + else: + dependencies = grp['channel_dependencies'][index] + if dependencies is None: + continue + if all(not isinstance(dep, ChannelArrayBlock) + for dep in dependencies): + channels = grp['channels'] + for channel in dependencies: + excluded_channels.add(channels.index(channel)) + else: + for dep in dependencies: for ch_nr, gp_nr in dep.referenced_channels: if gp_nr == group: - excluded_channels[group].append(ch_nr) - else: - grp = self.groups[group] - dependencies = grp['channel_dependencies'][index] - if dependencies is None: - continue - if all(dep['id'] == b'##CN' for dep in dependencies): - channels = grp['channels'] - for ch in dependencies: - excluded_channels[group].append(channels.index(ch)) - else: - for dep in dependencies: - for ch_nr, gp_nr in dep.referenced_channels: - if gp_nr == group: - excluded_channels[group].append(ch_nr) - else: - message = ('MDF filter error: ' - 'Channel "{}" not found, it will be ignored') - warn(message.format(ch)) - continue - - for group in excluded_channels: - excluded_indexes = excluded_channels[group] - if group in gps: - for index in excluded_indexes: - if index in gps[group]: - gps[group].remove(index) - - if memory is not None: - if memory not in ('full', 'low', 'minimum'): - memory = self.memory + excluded_channels.add(ch_nr) + + gps[group_index] = gps[group_index] - excluded_channels + + if memory is None or memory not in ('full', 'low', 'minimum'): + memory = self.memory mdf = MDF( version=self.version, @@ -584,7 +661,7 @@ def merge(files, outversion='4.10', memory='full'): Parameters ---------- files : list | tuple - list of MDF file names + list of MDF file names or MDF instances outversion : str merged file version memory : str @@ -603,11 +680,16 @@ def merge(files, outversion='4.10', memory='full'): if not files: raise MdfException('No files given for merge') - files = [MDF(file, memory) for file in files] + files = [ + file if isinstance(file, MDF) else MDF(file, memory) + for file in files + ] if not len(set(len(file.groups) for file in files)) == 1: - message = ("Can't merge files: " - "difference in number of data groups") + message = ( + "Can't merge files: " + "difference in number of data groups" + ) raise MdfException(message) merged = MDF( @@ -618,8 +700,10 @@ def merge(files, outversion='4.10', memory='full'): for i, groups in enumerate(zip(*(file.groups for file in files))): channels_nr = set(len(group['channels']) for group in groups) if not len(channels_nr) == 1: - message = ("Can't merge files: " - "different channel number for data groups {}") + message = ( + "Can't merge files: " + "different channel number for data groups {}" + ) raise MdfException(message.format(i)) signals = [] @@ -643,29 +727,20 @@ def merge(files, outversion='4.10', memory='full'): else: stream = file._tempfile - channel_texts = grp['texts']['channels'][j] - if channel_texts and \ - 'long_name_addr' in channel_texts: - address = grp['texts']['channels'][j]['long_name_addr'] + channel = ChannelV3( + address=grp['channels'][j], + stream=stream, + ) - block = TextBlockV3( - address=address, - stream=stream, - ) - name = block['text'] + if channel.get('long_name_addr', 0): + name = get_text_v3(channel['long_name_addr'], stream) else: - if file.version in MDF2_VERSIONS: - channel = ChannelV2( - address=grp['channels'][j], - stream=stream, - ) - else: - channel = ChannelV3( - address=grp['channels'][j], - stream=stream, - ) - name = channel['short_name'] - name = name.decode('latin-1').strip(' \r\n\t\0') + name = ( + channel['short_name'] + .decode('latin-1') + .strip(' \r\n\t\0') + .split('\\')[0] + ) else: grp = file.groups[i] if grp['data_location'] == 0: @@ -673,48 +748,67 @@ def merge(files, outversion='4.10', memory='full'): else: stream = file._tempfile - address = grp['texts']['channels'][j]['name_addr'] - - block = TextBlockV4( - address=address, + channel = ChannelV4( + address=grp['channels'][j], stream=stream, ) - name = block['text'] - name = name.decode('utf-8').strip(' \r\n\t\0') + name = get_text_v4(channel['name_addr'], stream) name = name.split('\\')[0] names.append(name) names = set(names) else: names = set(ch.name for ch in channels) if not len(names) == 1: - message = ("Can't merge files: " - "different channel names for data group {}") + message = ( + "Can't merge files: " + "different channel names for data group {}" + ) raise MdfException(message.format(i)) if j in excluded_channels: continue - sigs = [ + signals_to_merge = [ file.get(group=i, index=j, data=data) for file, data in zip(files, groups_data) ] - sig = sigs[0] - for s in sigs[1:]: - sig = sig.extend(s) + signal = signals_to_merge[0] + for merged_signal in signals_to_merge[1:]: + signal = signal.extend(merged_signal) - signals.append(sig) + signals.append(signal) if signals: merged.append(signals, common_timebase=True) return merged - def iter_to_pandas(self): + def iter_channels(self, skip_master=True): + """ generator that yields a `Signal` for each non-master channel + + Parameters + ---------- + skip_master : bool + do not yield master channels; default True + + """ + for i, group in enumerate(self.groups): + try: + master_index = self.masters_db[i] + except KeyError: + master_index = -1 + + for j, _ in enumerate(group['channels']): + if skip_master and j == master_index: + continue + yield self.get(group=i, index=j) + + def iter_groups(self): """ generator that yields channel groups as pandas DataFrames""" - for i, gp in enumerate(self.groups): - data = self._load_group_data(gp) + for i, group in enumerate(self.groups): + data = self._load_group_data(group) master_index = self.masters_db.get(i, None) if master_index is None: pandas_dict = {} @@ -725,7 +819,7 @@ def iter_to_pandas(self): data=data, ) pandas_dict = {master.name: master.samples} - for j, _ in enumerate(gp['channels']): + for j, _ in enumerate(group['channels']): if j == master_index: continue sig = self.get( @@ -753,7 +847,7 @@ def resample(self, raster, memory=None): """ - if memory is None: + if memory is None or memory not in ('full', 'low', 'minimum'): memory = self.memory mdf = MDF( @@ -762,13 +856,13 @@ def resample(self, raster, memory=None): ) # walk through all groups and get all channels - for i, gp in enumerate(self.groups): + for i, group in enumerate(self.groups): sigs = [] excluded_channels = self._excluded_channels(i) - data = self._load_group_data(gp) + data = self._load_group_data(group) - for j, _ in enumerate(gp['channels']): + for j, _ in enumerate(group['channels']): if j in excluded_channels: continue sig = self.get( @@ -796,7 +890,13 @@ def select(self, channels, dataframe=False): Parameters ---------- channels : list - list of channel names to be filtered + list of items to be filtered; each item can be : + + * a channel name string + * (channel_name, group index, channel index) list or tuple + * (channel name, group index) list or tuple + * (None, group index, channel index) lsit or tuple + dataframe: bool return a pandas DataFrame instead of a list of Signals; in this case the signals will be interpolated using the union of all @@ -805,34 +905,86 @@ def select(self, channels, dataframe=False): Returns ------- signals : list - lsit of *Signal* objects based on the input channel list + list of *Signal* objects based on the input channel list + + Examples + -------- + >>> from asammdf import MDF, Signal + >>> import numpy as np + >>> t = np.arange(5) + >>> s = np.ones(5) + >>> mdf = MDF() + >>> for i in range(4): + ... sigs = [Signal(s*(i*10+j), t, name='SIG') for j in range(1,4)] + ... mdf.append(sigs) + ... + >>> # select SIG group 0 default index 1 default, SIG group 3 index 1, SIG group 2 index 1 default and channel index 2 from group 1 + ... + >>> mdf.select(['SIG', ('SIG', 3, 1), ['SIG', 2], (None, 1, 2)]) + [ + , + , + , + ] """ # group channels by group index gps = {} - for ch in channels: - if ch in self.channels_db: - for group, index in self.channels_db[ch]: + + indexes = [] + + for item in channels: + if isinstance(item, (list, tuple)): + if len(item) not in (2, 3): + raise MdfException( + 'The items used for filtering must be strings, ' + 'or they must match the first 3 argumens of the get ' + 'method' + ) + else: + group, index = self._validate_channel_selection(*item) + indexes.append((group, index)) if group not in gps: - gps[group] = [] - gps[group].append(index) + gps[group] = set() + gps[group].add(index) else: - message = ('MDF filter error: ' - 'Channel "{}" not found, it will be ignored') - warn(message.format(ch)) - continue + name = item + group, index = self._validate_channel_selection(name) + indexes.append((group, index)) + if group not in gps: + gps[group] = set() + gps[group].add(index) - # append filtered channels to new MDF signals = {} for group in gps: grp = self.groups[group] data = self._load_group_data(grp) for index in gps[group]: signal = self.get(group=group, index=index, data=data) - signals[signal.name] = signal + signals[(group, index)] = signal - signals = [signals[channel] for channel in channels] + signals = [signals[pair] for pair in indexes] if dataframe: times = [s.timestamps for s in signals] @@ -848,5 +1000,33 @@ def select(self, channels, dataframe=False): return signals + def whereis(self, channel): + """ get ocurrences of channel name in the file + + Parameters + ---------- + channel : str + channel name string + + Returns + ------- + ocurrences : tuple + + + Examples + -------- + >>> mdf = MDF(file_name) + >>> mdf.whereis('VehicleSpeed') # "VehicleSpeed" exists in the file + ((1, 2), (2, 4)) + >>> mdf.whereis('VehicleSPD') # "VehicleSPD" doesn't exist in the file + () + + """ + if channel in self: + return tuple(self.channels_db[channel]) + else: + return tuple() + + if __name__ == '__main__': pass diff --git a/asammdf/mdf2.py b/asammdf/mdf2.py deleted file mode 100644 index 5a2e42223..000000000 --- a/asammdf/mdf2.py +++ /dev/null @@ -1,3493 +0,0 @@ -# -*- coding: utf-8 -*- -""" ASAM MDF version 3 file format module """ - -from __future__ import print_function, division -import sys - - -import os -import time -import warnings - -from collections import defaultdict -from functools import reduce, partial -from tempfile import TemporaryFile -from itertools import product -from copy import deepcopy - -from numpy import ( - interp, - linspace, - dtype, - array_equal, - column_stack, - array, - searchsorted, - log, - exp, - clip, - union1d, - float64, - flip, - unpackbits, - packbits, - roll, - zeros, - uint8, - arange, -) -from numpy.core.records import fromstring, fromarrays -from numpy.core.defchararray import encode -from numexpr import evaluate - -from .utils import ( - MdfException, - get_fmt, - pair, - fmt_to_datatype, - get_unique_name, - get_min_max, - fix_dtype_fields, -) -from .signal import Signal -from .version import __version__ -from . import v2constants as v2c -from .v2blocks import ( - Channel, - ChannelConversion, - ChannelDependency, - ChannelExtension, - ChannelGroup, - DataBlock, - DataGroup, - FileIdentificationBlock, - HeaderBlock, - TextBlock, - TriggerBlock, -) - - -get_fmt = partial(get_fmt, version=2) -fmt_to_datatype = partial(fmt_to_datatype, version=2) - -PYVERSION = sys.version_info[0] -if PYVERSION == 2: - from .utils import bytes - - -__all__ = ['MDF2', ] - - -class MDF2(object): - """If the *name* exist it will be loaded otherwise an empty file will be - created that can be later saved to disk - - Parameters - ---------- - name : string - mdf file name - memory : str - memory optimization option; default `full` - - * if *full* the data group binary data block will be memorised in RAM - * if *low* the channel data is read from disk on request, and the - metadata is memorised into RAM - * if *minimum* only minimal data is memorised into RAM - - version : string - mdf file version ('2.00' or '2.14'); default '2.14' - - Attributes - ---------- - name : string - mdf file name - groups : list - list of data groups - header : OrderedDict - mdf file header - file_history : TextBlock - file history text block; can be None - memory : bool - load measured data option - version : str - mdf version - channels_db : dict - used for fast channel access by name; for each name key the value is a - list of (group index, channel index) tuples - masters_db : dict - used for fast master channel access; for each group index key the value - is the master channel index - - """ - - _compact_integers_on_append = False - _overwrite = False - - def __init__(self, name=None, memory=2, version='2.14'): - self.groups = [] - self.header = None - self.identification = None - self.file_history = None - self.name = name - self.memory = memory - self.channels_db = {} - self.masters_db = {} - - self._master_channel_cache = {} - - # used for appending to MDF created with memory=False - self._tempfile = TemporaryFile() - self._tempfile.write(b'\0') - self._file = None - - if name: - self._file = open(self.name, 'rb') - self._read() - else: - self.identification = FileIdentificationBlock(version=version) - self.version = version - self.header = HeaderBlock(version=self.version) - - def _load_group_data(self, group): - """ get group's data block bytes""" - - if self.memory == 'full': - data = group['data_block']['data'] - else: - # could be an appended group - # for now appended groups keep the measured data in the memory. - # the plan is to use a temp file for appended groups, to keep the - # memory usage low. - if group['data_location'] == v2c.LOCATION_ORIGINAL_FILE: - # this is a group from the source file - # so fetch the measured data from it - stream = self._file - # go to the first data block of the current data group - dat_addr = group['data_group']['data_block_addr'] - - if group['sorted']: - read_size = group['size'] - data = DataBlock( - stream=stream, - address=dat_addr, size=read_size, - ) - data = data['data'] - - else: - read_size = group['size'] - record_id = group['channel_group']['record_id'] - cg_size = group['record_size'] - if group['data_group']['record_id_nr'] <= 2: - record_id_nr = group['data_group']['record_id_nr'] - else: - record_id_nr = 0 - cg_data = [] - - data = DataBlock( - stream=stream, - address=dat_addr, size=read_size, - ) - data = data['data'] - - i = 0 - size = len(data) - while i < size: - rec_id = data[i] - # skip record id - i += 1 - rec_size = cg_size[rec_id] - if rec_id == record_id: - rec_data = data[i: i+rec_size] - cg_data.append(rec_data) - # consider the second record ID if it exists - if record_id_nr == 2: - i += rec_size + 1 - else: - i += rec_size - data = b''.join(cg_data) - elif group['data_location'] == v2c.LOCATION_TEMPORARY_FILE: - read_size = group['size'] - dat_addr = group['data_group']['data_block_addr'] - if dat_addr: - self._tempfile.seek(dat_addr, v2c.SEEK_START) - data = self._tempfile.read(read_size) - else: - data = b'' - - return data - - def _prepare_record(self, group): - """ compute record dtype and parents dict for this group - - Parameters - ---------- - group : dict - MDF group dict - - Returns - ------- - parents, dtypes : dict, numpy.dtype - mapping of channels to records fields, records fiels dtype - - """ - - memory = self.memory - stream = self._file - grp = group - record_size = grp['channel_group']['samples_byte_nr'] << 3 - next_byte_aligned_position = 0 - types = [] - current_parent = "" - parent_start_offset = 0 - parents = {} - group_channels = set() - - if memory != 'minimum': - channels = grp['channels'] - else: - channels = [ - Channel(address=ch_addr, stream=stream) - for ch_addr in grp['channels'] - ] - - # the channels are first sorted ascending (see __lt__ method of Channel - # class): a channel with lower start offset is smaller, when two - # channels havethe same start offset the one with higer bit size is - # considered smaller. The reason is that when the numpy record is built - # and there are overlapping channels, the parent fields mustbe bigger - # (bit size) than the embedded channels. For each channel the parent - # dict will have a (parent name, bit offset) pair: the channel value is - # computed using the values from the parent field, and the bit offset, - # which is the channel's bit offset within the parent bytes. - # This means all parents will have themselves as parent, and bit offset - # of 0. Gaps in the records are also considered. Non standard integers - # size is adjusted to the first higher standard integer size (eq. uint - # of 28bits will be adjusted to 32bits) - - sortedchannels = sorted(enumerate(channels), key=lambda i: i[1]) - for original_index, new_ch in sortedchannels: - # skip channels with channel dependencies from the numpy record - if new_ch['ch_depend_addr']: - continue - - start_offset = new_ch['start_offset'] - bit_offset = start_offset % 8 - data_type = new_ch['data_type'] - bit_count = new_ch['bit_count'] - if memory == 'minimum': - channel_texts = grp['texts']['channels'][original_index] - if channel_texts and 'long_name_addr' in channel_texts: - address = grp['texts']['channels'][original_index]['long_name_addr'] - - block = TextBlock( - address=address, - stream=stream, - ) - name = block['text'].decode('latin-1').strip(' \r\n\t\0') - else: - name = new_ch['short_name'].decode('latin-1').strip(' \r\n\t\0') - name = name.split('\\')[0] - else: - name = new_ch.name - - # handle multiple occurance of same channel name - name = get_unique_name(group_channels, name) - group_channels.add(name) - - if start_offset >= next_byte_aligned_position: - parent_start_offset = (start_offset // 8) * 8 - - # check if there are byte gaps in the record - gap = (parent_start_offset - next_byte_aligned_position) // 8 - if gap: - types.append(('', 'a{}'.format(gap))) - - # adjust size to 1, 2, 4 or 8 bytes for nonstandard integers - size = bit_offset + bit_count - if data_type == v2c.DATA_TYPE_STRING: - next_byte_aligned_position = parent_start_offset + size - size = size // 8 - if next_byte_aligned_position <= record_size: - dtype_pair = (name, get_fmt(data_type, size)) - types.append(dtype_pair) - parents[original_index] = name, bit_offset - - elif data_type == v2c.DATA_TYPE_BYTEARRAY: - size = size // 8 - next_byte_aligned_position = parent_start_offset + size - if next_byte_aligned_position <= record_size: - dtype_pair = (name, 'u1', (size, 1)) - types.append(dtype_pair) - parents[original_index] = name, bit_offset - - else: - if size > 32: - next_byte_aligned_position = parent_start_offset + 64 - size = 8 - elif size > 16: - next_byte_aligned_position = parent_start_offset + 32 - size = 4 - elif size > 8: - next_byte_aligned_position = parent_start_offset + 16 - size = 2 - else: - next_byte_aligned_position = parent_start_offset + 8 - size = 1 - - if next_byte_aligned_position <= record_size: - dtype_pair = (name, get_fmt(data_type, size)) - types.append(dtype_pair) - parents[original_index] = name, bit_offset - - current_parent = name - else: - max_overlapping = next_byte_aligned_position - start_offset - if max_overlapping >= bit_count: - parents[original_index] = ( - current_parent, - start_offset - parent_start_offset, - ) - if next_byte_aligned_position > record_size: - break - - gap = (record_size - next_byte_aligned_position) >> 3 - if gap: - dtype_pair = ('', 'a{}'.format(gap)) - types.append(dtype_pair) - - if PYVERSION == 2: - types = fix_dtype_fields(types) - - return parents, dtype(types) - - def _get_not_byte_aligned_data(self, data, group, ch_nr): - - big_endian_types = ( - v2c.DATA_TYPE_UNSIGNED_MOTOROLA, - v2c.DATA_TYPE_FLOAT_MOTOROLA, - v2c.DATA_TYPE_DOUBLE_MOTOROLA, - v2c.DATA_TYPE_SIGNED_MOTOROLA, - ) - - record_size = group['channel_group']['samples_byte_nr'] - - if self.memory != 'minimum': - channel = group['channels'][ch_nr] - else: - channel = Channel( - address=group['channels'][ch_nr], - stream=self._file, - ) - - bit_offset = channel['start_offset'] % 8 - byte_offset = channel['start_offset'] // 8 - bit_count = channel['bit_count'] - - byte_count = bit_offset + bit_count - if byte_count % 8: - byte_count = (byte_count >> 3) + 1 - else: - byte_count >>= 3 - - types = [ - ('', 'a{}'.format(byte_offset)), - ('vals', '({},)u1'.format(byte_count)), - ('', 'a{}'.format(record_size - byte_count - byte_offset)), - ] - - vals = fromstring(data, dtype=dtype(types)) - - vals = vals['vals'] - - if channel['data_type'] not in big_endian_types: - vals = flip(vals, 1) - - vals = unpackbits(vals) - vals = roll(vals, bit_offset) - vals = vals.reshape((len(vals) // 8, 8)) - vals = packbits(vals) - vals = vals.reshape((len(vals) // byte_count, byte_count)) - - if bit_count < 64: - mask = 2 ** bit_count - 1 - masks = [] - while mask: - masks.append(mask & 0xFF) - mask >>= 8 - for i in range(byte_count - len(masks)): - masks.append(0) - - masks = masks[::-1] - for i, mask in enumerate(masks): - vals[:, i] &= mask - - if channel['data_type'] not in big_endian_types: - vals = flip(vals, 1) - - if bit_count <= 8: - size = 1 - elif bit_count <= 16: - size = 2 - elif bit_count <= 32: - size = 4 - elif bit_count <= 64: - size = 8 - else: - size = bit_count // 8 - - if size > byte_count: - extra_bytes = size - byte_count - extra = zeros((len(vals), extra_bytes), dtype=uint8) - - types = [ - ('vals', vals.dtype, vals.shape[1:]), - ('', extra.dtype, extra.shape[1:]), - ] - vals = fromarrays([vals, extra], dtype=dtype(types)) - vals = vals.tostring() - - fmt = get_fmt(channel['data_type'], size) - if size <= byte_count: - types = [ - ('vals', fmt), - ('', 'a{}'.format(byte_count - size)), - ] - else: - types = [('vals', fmt), ] - - vals = fromstring(vals, dtype=dtype(types)) - - return vals['vals'] - - def _validate_channel_selection(self, name=None, group=None, index=None): - """Gets channel comment. - - Channel can be specified in two ways: - - * using the first positional argument *name* - - * if there are multiple occurrences for this channel then the - *group* and *index* arguments can be used to select a specific - group. - * if there are multiple occurrences for this channel and either the - *group* or *index* arguments is None then a warning is issued - - * using the group number (keyword argument *group*) and the channel - number (keyword argument *index*). Use *info* method for group and - channel numbers - - - If the *raster* keyword argument is not *None* the output is - interpolated accordingly. - - Parameters - ---------- - name : string - name of channel - group : int - 0-based group index - index : int - 0-based channel index - - Returns - ------- - group_index, channel_index : (int, int) - selected channel's group and channel index - - """ - if name is None: - if group is None or index is None: - message = ('Invalid arguments for channel selection: ' - 'must give "name" or, "group" and "index"') - raise MdfException(message) - else: - gp_nr, ch_nr = group, index - if gp_nr > len(self.groups) - 1: - raise MdfException('Group index out of range') - if index > len(self.groups[gp_nr]['channels']) - 1: - raise MdfException('Channel index out of range') - else: - name = name.split('\\')[0] - if name not in self.channels_db: - raise MdfException('Channel "{}" not found'.format(name)) - else: - if group is None: - gp_nr, ch_nr = self.channels_db[name][0] - if len(self.channels_db[name]) > 1: - message = ('Multiple occurances for channel "{}". ' - 'Using first occurance from data group {}. ' - 'Provide both "group" and "index" arguments' - ' to select another data group') - message = message.format(name, gp_nr) - warnings.warn(message) - else: - group_valid = False - for gp_nr, ch_nr in self.channels_db[name]: - if gp_nr == group: - group_valid = True - if index is None: - break - elif index == ch_nr: - break - else: - if group_valid: - gp_nr, ch_nr = self.channels_db[name][group] - message = ('You have selected channel index "{}"' - 'of group "{}" for channel "{}", but ' - 'this channel index is invalid. Using ' - 'first occurance of "{}" in this group' - ' at index "{}"') - message = message.format( - index, - group, - name, - name, - ch_nr, - ) - else: - gp_nr, ch_nr = self.channels_db[name][0] - message = ('You have selected group "{}" for ' - 'channel "{}", but this channel was not' - ' found in this group, or this group ' - 'index does not exist. Using first ' - 'occurance of "{}" from group "{}"') - message = message.format(group, name, name, gp_nr) - warnings.warn(message) - return gp_nr, ch_nr - - def _read(self): - stream = self._file - memory = self.memory - - # performance optimization - read = stream.read - seek = stream.seek - - dg_cntr = 0 - seek(0, v2c.SEEK_START) - - self.identification = FileIdentificationBlock( - stream=stream, - ) - self.header = HeaderBlock(stream=stream) - - self.version = self.identification['version_str']\ - .decode('latin-1')\ - .strip(' \n\t\0') - - self.file_history = TextBlock( - address=self.header['comment_addr'], - stream=stream, - ) - - # this will hold mapping from channel address to Channel object - # needed for linking dependency blocks to referenced channels after - # the file is loaded - ch_map = {} - - # go to first date group - dg_addr = self.header['first_dg_addr'] - # read each data group sequentially - while dg_addr: - gp = DataGroup(address=dg_addr, stream=stream) - record_id_nr = gp['record_id_nr'] - cg_nr = gp['cg_nr'] - cg_addr = gp['first_cg_addr'] - data_addr = gp['data_block_addr'] - - # read trigger information if available - trigger_addr = gp['trigger_addr'] - if trigger_addr: - trigger = TriggerBlock(address=trigger_addr, - stream=stream) - if trigger['text_addr']: - trigger_text = TextBlock( - address=trigger['text_addr'], - stream=stream, - ) - else: - trigger_text = None - else: - trigger = None - trigger_text = None - - new_groups = [] - for i in range(cg_nr): - - new_groups.append({}) - grp = new_groups[-1] - grp['channels'] = [] - grp['channel_conversions'] = [] - grp['channel_extensions'] = [] - grp['data_block'] = None - grp['texts'] = { - 'channels': [], - 'conversion_tab': [], - 'channel_group': [], - } - grp['trigger'] = [trigger, trigger_text] - grp['channel_dependencies'] = [] - - if record_id_nr: - grp['sorted'] = False - else: - grp['sorted'] = True - - kargs = {'first_cg_addr': cg_addr, - 'data_block_addr': data_addr} - if self.version in ('3.20', '3.30'): - kargs['block_len'] = v2c.DG32_BLOCK_SIZE - else: - kargs['block_len'] = v2c.DG31_BLOCK_SIZE - - grp['data_group'] = DataGroup(**kargs) - - # read each channel group sequentially - grp['channel_group'] = ChannelGroup( - address=cg_addr, - stream=stream, - ) - - # read name and comment for current channel group - cg_texts = {} - grp['texts']['channel_group'].append(cg_texts) - - address = grp['channel_group']['comment_addr'] - if address: - if memory != 'minimum': - block = TextBlock( - address=address, - stream=stream, - ) - cg_texts['comment_addr'] = block - else: - cg_texts['comment_addr'] = address - - # go to first channel of the current channel group - ch_addr = grp['channel_group']['first_ch_addr'] - ch_cntr = 0 - grp_chs = grp['channels'] - grp_conv = grp['channel_conversions'] - grp_ch_texts = grp['texts']['channels'] - - while ch_addr: - # read channel block and create channel object - new_ch = Channel( - address=ch_addr, - stream=stream, - ) - - # check if it has channel dependencies - if new_ch['ch_depend_addr']: - dep = ChannelDependency( - address=new_ch['ch_depend_addr'], - stream=stream, - ) - grp['channel_dependencies'].append(dep) - else: - grp['channel_dependencies'].append(None) - - # update channel map - ch_map[ch_addr] = (ch_cntr, dg_cntr) - - # read conversion block - address = new_ch['conversion_addr'] - if address: - new_conv = ChannelConversion( - address=address, - stream=stream, - ) - if memory != 'minimum': - grp_conv.append(new_conv) - else: - grp_conv.append(address) - else: - new_conv = None - if memory != 'minimum': - grp_conv.append(None) - else: - grp_conv.append(0) - - vtab_texts = {} - if new_conv: - conv_type = new_conv['conversion_type'] - else: - conv_type = 0 - if conv_type == v2c.CONVERSION_TYPE_VTABR: - for idx in range(new_conv['ref_param_nr']): - address = new_conv['text_{}'.format(idx)] - if address: - if memory != 'minimum': - block = TextBlock( - address=address, - stream=stream, - ) - vtab_texts['text_{}'.format(idx)] = block - else: - vtab_texts['text_{}'.format(idx)] = address - - if vtab_texts: - grp['texts']['conversion_tab'].append(vtab_texts) - else: - grp['texts']['conversion_tab'].append(None) - - address = new_ch['source_depend_addr'] - if memory != 'minimum': - if address: - block = ChannelExtension( - address=address, - stream=stream, - ) - grp['channel_extensions'].append(block) - else: - grp['channel_extensions'].append(None) - else: - grp['channel_extensions'].append(address) - - # read text fields for channel - ch_texts = {} - for key in ( - 'long_name_addr', - 'comment_addr'): - address = new_ch.get(key, 0) - if address: - if memory != 'minimum': - ch_texts[key] = TextBlock( - address=address, - stream=stream, - ) - else: - ch_texts[key] = address - - if ch_texts: - grp_ch_texts.append(ch_texts) - else: - grp_ch_texts.append(None) - - # update channel object name and block_size attributes - if new_ch.get('long_name_addr', 0): - if memory != 'minimum': - name = ch_texts['long_name_addr']['text'] - else: - block = TextBlock( - address=ch_texts['long_name_addr'], - stream=stream, - ) - name = block['text'] - else: - name = new_ch['short_name'] - name = name.decode('latin-1').strip(' \n\t\0') - name = name.split('\\')[0] - new_ch.name = name - - if name in self.channels_db: - self.channels_db[name].append((dg_cntr, ch_cntr)) - else: - self.channels_db[name] = [] - self.channels_db[name].append((dg_cntr, ch_cntr)) - - if new_ch['channel_type'] == v2c.CHANNEL_TYPE_MASTER: - self.masters_db[dg_cntr] = ch_cntr - # go to next channel of the current channel group - - ch_cntr += 1 - if memory != 'minimum': - grp_chs.append(new_ch) - else: - grp_chs.append(ch_addr) - ch_addr = new_ch['next_ch_addr'] - - cg_addr = grp['channel_group']['next_cg_addr'] - dg_cntr += 1 - - # store channel groups record sizes dict and data block size in - # each new group data belong to the initial unsorted group, and - # add the key 'sorted' with the value False to use a flag; - # this is used later if memory=False - - cg_size = {} - total_size = 0 - - for grp in new_groups: - record_id = grp['channel_group']['record_id'] - cycles_nr = grp['channel_group']['cycles_nr'] - record_size = grp['channel_group']['samples_byte_nr'] - - cg_size[record_id] = record_size - - record_size += record_id_nr - total_size += record_size * cycles_nr - - grp['record_size'] = cg_size - grp['size'] = total_size - - if memory == 'full': - # read data block of the current data group - dat_addr = gp['data_block_addr'] - if dat_addr: - seek(dat_addr, v2c.SEEK_START) - data = read(total_size) - else: - data = b'' - if record_id_nr == 0: - grp = new_groups[0] - grp['data_location'] = v2c.LOCATION_MEMORY - grp['data_block'] = DataBlock(data=data) - - else: - # agregate data for each record ID in the cg_data dict - cg_data = defaultdict(list) - i = 0 - size = len(data) - while i < size: - rec_id = data[i] - # skip record id - i += 1 - rec_size = cg_size[rec_id] - rec_data = data[i: i+rec_size] - cg_data[rec_id].append(rec_data) - # possibly skip 2nd record id - if record_id_nr == 2: - i += rec_size + 1 - else: - i += rec_size - for grp in new_groups: - grp['data_location'] = v2c.LOCATION_MEMORY - data = cg_data[grp['channel_group']['record_id']] - data = b''.join(data) - grp['channel_group']['record_id'] = 1 - grp['data_block'] = DataBlock(data=data) - else: - for grp in new_groups: - grp['data_location'] = v2c.LOCATION_ORIGINAL_FILE - - self.groups.extend(new_groups) - - # go to next data group - dg_addr = gp['next_dg_addr'] - - # finally update the channel depency references - for grp in self.groups: - for dep in grp['channel_dependencies']: - if dep: - for i in range(dep['sd_nr']): - ref_channel_addr = dep['ch_{}'.format(i)] - channel = ch_map[ref_channel_addr] - dep.referenced_channels.append(channel) - - if self.memory == 'full': - self.close() - - def add_trigger(self, - group, - timestamp, - pre_time=0, - post_time=0, - comment=''): - """ add trigger to data group - - Parameters - ---------- - group : int - group index - timestamp : float - trigger time - pre_time : float - trigger pre time; default 0 - post_time : float - trigger post time; default 0 - comment : str - trigger comment - - """ - gp = self.groups[group] - trigger, trigger_text = gp['trigger'] - if trigger: - nr = trigger['trigger_event_nr'] - trigger['trigger_event_nr'] += 1 - trigger['block_len'] += 24 - trigger['trigger_{}_time'.format(nr)] = timestamp - trigger['trigger_{}_pretime'.format(nr)] = pre_time - trigger['trigger_{}_posttime'.format(nr)] = post_time - if trigger_text is None and comment: - trigger_text = TextBlock(text=comment) - gp['trigger'][1] = trigger_text - else: - trigger = TriggerBlock( - trigger_event_nr=1, - trigger_0_time=timestamp, - trigger_0_pretime=pre_time, - trigger_0_posttime=post_time, - ) - if comment: - trigger_text = TextBlock(text=comment) - else: - trigger_text = None - - gp['trigger'] = [trigger, trigger_text] - - def append(self, - signals, - acquisition_info='Python', - common_timebase=False): - """ - Appends a new data group. - - For channel dependencies type Signals, the *samples* attribute must be a - numpy.recarray - - Parameters - ---------- - signals : list - list on *Signal* objects - acquisition_info : str - acquisition information; default 'Python' - common_timebase : bool - flag to hint that the signals have the same timebase - - - Examples - -------- - >>> # case 1 conversion type None - >>> s1 = np.array([1, 2, 3, 4, 5]) - >>> s2 = np.array([-1, -2, -3, -4, -5]) - >>> s3 = np.array([0.1, 0.04, 0.09, 0.16, 0.25]) - >>> t = np.array([0.001, 0.002, 0.003, 0.004, 0.005]) - >>> names = ['Positive', 'Negative', 'Float'] - >>> units = ['+', '-', '.f'] - >>> info = {} - >>> s1 = Signal(samples=s1, timstamps=t, unit='+', name='Positive') - >>> s2 = Signal(samples=s2, timstamps=t, unit='-', name='Negative') - >>> s3 = Signal(samples=s3, timstamps=t, unit='flts', name='Floats') - >>> mdf = MDF2('new.mdf') - >>> mdf.append([s1, s2, s3], 'created by asammdf v1.1.0') - >>> # case 2: VTAB conversions from channels inside another file - >>> mdf1 = MDF2('in.mdf') - >>> ch1 = mdf1.get("Channel1_VTAB") - >>> ch2 = mdf1.get("Channel2_VTABR") - >>> sigs = [ch1, ch2] - >>> mdf2 = MDF2('out.mdf') - >>> mdf2.append(sigs, 'created by asammdf v1.1.0') - - """ - if not signals: - error = '"append" requires a non-empty list of Signal objects' - raise MdfException(error) - - # check if the signals have a common timebase - # if not interpolate the signals using the union of all timbases - t_ = signals[0].timestamps - if not common_timebase: - for s in signals[1:]: - if not array_equal(s.timestamps, t_): - different = True - break - else: - different = False - - if different: - times = [s.timestamps for s in signals] - t = reduce(union1d, times).flatten().astype(float64) - signals = [s.interp(t) for s in signals] - times = None - else: - t = t_ - else: - t = t_ - - memory = self.memory - file = self._tempfile - write = file.write - tell = file.tell - - # split regular from composed signals. Composed signals have recarray - # samples or multimendional ndarray. - # The regular signals will be first added to the group. - # The composed signals will be saved along side the fields, which will - # be saved as new signals. - simple_signals = [ - sig for sig in signals - if len(sig.samples.shape) <= 1 and - sig.samples.dtype.names is None - ] - composed_signals = [ - sig for sig in signals - if len(sig.samples.shape) > 1 or - sig.samples.dtype.names - ] - - # mdf version 4 structure channels and CANopen types will be saved to - # new channel groups - new_groups_signals = [ - sig for sig in composed_signals - if sig.samples.dtype.names and - sig.samples.dtype.names[0] != sig.name - ] - composed_signals = [ - sig for sig in composed_signals - if not sig.samples.dtype.names or - sig.samples.dtype.names[0] == sig.name - ] - - if simple_signals or composed_signals: - dg_cntr = len(self.groups) - - gp = {} - gp['channels'] = gp_channels = [] - gp['channel_conversions'] = gp_conv = [] - gp['channel_extensions'] = gp_source = [] - gp['channel_dependencies'] = gp_dep = [] - gp['texts'] = gp_texts = { - 'channels': [], - 'conversion_tab': [], - 'channel_group': [], - } - self.groups.append(gp) - - cycles_nr = len(t) - fields = [] - types = [] - parents = {} - ch_cntr = 0 - offset = 0 - field_names = set() - - # setup all blocks related to the time master channel - - # time channel texts - for _, item in gp_texts.items(): - item.append(None) - - gp_texts['channel_group'][-1] = {} - block = TextBlock(text=acquisition_info) - if memory != 'minimum': - gp_texts['channel_group'][-1]['comment_addr'] = block - else: - address = tell() - gp_texts['channel_group'][-1]['comment_addr'] = address - write(bytes(block)) - - # conversion for time channel - kargs = { - 'conversion_type': v2c.CONVERSION_TYPE_NONE, - 'unit': b's', - 'min_phy_value': t[0] if cycles_nr else 0, - 'max_phy_value': t[-1] if cycles_nr else 0, - } - block = ChannelConversion(**kargs) - if memory != 'minimum': - gp_conv.append(block) - else: - address = tell() - gp_conv.append(address) - write(bytes(block)) - - # source for time - kargs = { - 'module_nr': 0, - 'module_address': 0, - 'type': v2c.SOURCE_ECU, - 'description': b'Channel inserted by Python Script', - } - block = ChannelExtension(**kargs) - if memory != 'minimum': - gp_source.append(block) - else: - address = tell() - gp_source.append(address) - write(bytes(block)) - - # time channel - t_type, t_size = fmt_to_datatype(t.dtype) - kargs = { - 'short_name': b't', - 'channel_type': v2c.CHANNEL_TYPE_MASTER, - 'data_type': t_type, - 'start_offset': 0, - 'min_raw_value': t[0] if cycles_nr else 0, - 'max_raw_value': t[-1] if cycles_nr else 0, - 'bit_count': t_size, - } - if self.version == '2.00': - kargs['block_len'] = v2c.CN20_BLOCK_SIZE - else: - kargs['block_len'] = v2c.CN21_BLOCK_SIZE - channel = Channel(**kargs) - channel.name = name = 't' - if memory != 'minimum': - gp_channels.append(channel) - else: - address = tell() - gp_channels.append(address) - write(bytes(channel)) - - if name not in self.channels_db: - self.channels_db[name] = [] - self.channels_db[name].append((dg_cntr, ch_cntr)) - self.masters_db[dg_cntr] = 0 - # data group record parents - parents[ch_cntr] = name, 0 - - # time channel doesn't have channel dependencies - gp_dep.append(None) - - fields.append(t) - types.append((name, t.dtype)) - field_names.add(name) - - offset += t_size - ch_cntr += 1 - - if self._compact_integers_on_append: - compacted_signals = [ - {'signal': sig} - for sig in simple_signals - if sig.samples.dtype.kind in 'ui' - ] - - max_itemsize = 1 - dtype_ = dtype(uint8) - - for signal in compacted_signals: - itemsize = signal['signal'].samples.dtype.itemsize - - min_, max_ = get_min_max(signal['signal'].samples) - signal['min'], signal['max'] = min_, max_ - minimum_bitlength = (itemsize // 2) * 8 + 1 - bit_length = max( - int(max_).bit_length(), - int(min_).bit_length(), - ) - - signal['bit_count'] = max(minimum_bitlength, bit_length) - - if itemsize > max_itemsize: - dtype_ = dtype(' dtype_size: - break - else: - cluster.append(compacted_signals.pop(0)) - size += head_size - - bit_offset = 0 - field_name = get_unique_name(field_names, 'COMPACT') - types.append((field_name, dtype_)) - field_names.add(field_name) - - values = zeros(cycles_nr, dtype=dtype_) - - for signal_d in cluster: - - signal = signal_d['signal'] - bit_count = signal_d['bit_count'] - min_val = signal_d['min'] - max_val = signal_d['max'] - - name = signal.name - for _, item in gp['texts'].items(): - item.append(None) - - texts = {} - if len(name) >= 32 and self.version == '2.14': - block = TextBlock(text=name) - if memory != 'minimum': - texts['long_name_addr'] = block - else: - address = tell() - texts['long_name_addr'] = address - write(bytes(block)) - if texts: - gp_texts['channels'][-1] = texts - - texts = {} - info = signal.info - if info and 'raw' in info and not info['raw'].dtype.kind == 'S': - kargs = {} - kargs['conversion_type'] = v2c.CONVERSION_TYPE_VTAB - raw = info['raw'] - phys = info['phys'] - for i, (r_, p_) in enumerate(zip(raw, phys)): - kargs['text_{}'.format(i)] = p_[:31] + b'\0' - kargs['param_val_{}'.format(i)] = r_ - kargs['ref_param_nr'] = len(raw) - kargs['unit'] = signal.unit.encode('latin-1') - elif info and 'lower' in info: - kargs = {} - kargs['conversion_type'] = v2c.CONVERSION_TYPE_VTABR - lower = info['lower'] - upper = info['upper'] - texts = info['phys'] - kargs['unit'] = signal.unit.encode('latin-1') - kargs['ref_param_nr'] = len(upper) - - for i, vals in enumerate(zip(upper, lower, texts)): - u_, l_, t_ = vals - kargs['lower_{}'.format(i)] = l_ - kargs['upper_{}'.format(i)] = u_ - kargs['text_{}'.format(i)] = 0 - - key = 'text_{}'.format(i) - block = TextBlock(text=t_) - if memory != 'minimum': - texts[key] = block - else: - address = tell() - texts[key] = address - write(bytes(block)) - - else: - if min_val <= max_val: - min_phy_value = min_val - max_phy_value = max_val - else: - min_phy_value = 0 - max_phy_value = 0 - kargs = { - 'conversion_type': v2c.CONVERSION_TYPE_NONE, - 'unit': signal.unit.encode('latin-1'), - 'min_phy_value': min_phy_value, - 'max_phy_value': max_phy_value, - } - - if texts: - gp_texts['conversion_tab'][-1] = texts - - block = ChannelConversion(**kargs) - if memory != 'minimum': - gp_conv.append(block) - else: - address = tell() - gp_conv.append(address) - write(bytes(block)) - - # source for channel - kargs = { - 'module_nr': 0, - 'module_address': 0, - 'type': v2c.SOURCE_ECU, - 'description': b'Channel inserted by Python Script', - } - block = ChannelExtension(**kargs) - if memory != 'minimum': - gp_source.append(block) - else: - address = tell() - gp_source.append(address) - write(bytes(block)) - - # compute additional byte offset for large records size - current_offset = offset + bit_offset - if current_offset > v2c.MAX_UINT16: - additional_byte_offset = \ - (current_offset - v2c.MAX_UINT16) >> 3 - start_bit_offset = \ - current_offset - additional_byte_offset << 3 - else: - start_bit_offset = current_offset - additional_byte_offset = 0 - - if signal.samples.dtype.kind == 'u': - data_type = v2c.DATA_TYPE_UNSIGNED_INTEL - else: - data_type = v2c.DATA_TYPE_SIGNED_INTEL - - texts = {} - if len(name) >= 32 and self.version == '2.14': - short_name = (name[:31] + '\0').encode('latin-1') - if memory != 'minimum': - texts['long_name_addr'] = TextBlock(texts=name) - else: - address = tell() - texts['long_name_addr'] = address - block = TextBlock(texts=name) - gp_channels.append(address) - write(bytes(block)) - else: - short_name = name.encode('latin-1') - - if texts: - gp_texts['channels'][-1] = texts - - kargs = { - 'short_name': short_name, - 'channel_type': v2c.CHANNEL_TYPE_VALUE, - 'data_type': data_type, - 'min_raw_value': min_val if min_val <= max_val else 0, - 'max_raw_value': max_val if min_val <= max_val else 0, - 'start_offset': start_bit_offset, - 'bit_count': bit_count, - 'aditional_byte_offset': additional_byte_offset, - } - if self.version == '2.00': - kargs['block_len'] = v2c.CN20_BLOCK_SIZE - else: - kargs['block_len'] = v2c.CN21_BLOCK_SIZE - comment = signal.comment - if comment: - comment = comment.encode('latin-1') - if len(comment) >= 128: - comment = comment[:127] + b'\0' - kargs['description'] = comment - - channel = Channel(**kargs) - channel.name = name - if memory != 'minimum': - gp_channels.append(channel) - else: - address = tell() - gp_channels.append(address) - write(bytes(channel)) - - if name not in self.channels_db: - self.channels_db[name] = [] - self.channels_db[name].append((dg_cntr, ch_cntr)) - - # update the parents as well - parents[ch_cntr] = field_name, bit_offset - - # simple channels don't have channel dependencies - gp_dep.append(None) - - values += signal.samples.astype(dtype_) << bit_offset - bit_offset += bit_count - - ch_cntr += 1 - - offset += dtype_.itemsize * 8 - fields.append(values) - - # first add the signals in the simple signal list - for signal in simple_signals: - # channels texts - name = signal.name - for _, item in gp['texts'].items(): - item.append(None) - - texts = {} - if len(name) >= 32 and self.version == '2.14': - block = TextBlock(text=name) - if memory != 'minimum': - texts['long_name_addr'] = block - else: - address = tell() - texts['long_name_addr'] = address - write(bytes(block)) - if texts: - gp_texts['channels'][-1] = texts - - # conversions for channel - min_val, max_val = get_min_max(signal.samples) - - texts = {} - info = signal.info - if info and 'raw' in info and not info['raw'].dtype.kind == 'S': - kargs = {} - kargs['conversion_type'] = v2c.CONVERSION_TYPE_VTAB - raw = info['raw'] - phys = info['phys'] - for i, (r_, p_) in enumerate(zip(raw, phys)): - kargs['text_{}'.format(i)] = p_[:31] + b'\0' - kargs['param_val_{}'.format(i)] = r_ - kargs['ref_param_nr'] = len(raw) - kargs['unit'] = signal.unit.encode('latin-1') - elif info and 'lower' in info: - kargs = {} - kargs['conversion_type'] = v2c.CONVERSION_TYPE_VTABR - lower = info['lower'] - upper = info['upper'] - texts_ = info['phys'] - kargs['unit'] = signal.unit.encode('latin-1') - kargs['ref_param_nr'] = len(upper) - - for i, (u_, l_, t_) in enumerate(zip(upper, lower, texts_)): - kargs['lower_{}'.format(i)] = l_ - kargs['upper_{}'.format(i)] = u_ - kargs['text_{}'.format(i)] = 0 - - key = 'text_{}'.format(i) - block = TextBlock(text=t_) - if memory != 'minimum': - texts[key] = block - else: - address = tell() - texts[key] = address - write(bytes(block)) - - else: - if min_val <= max_val: - min_phy_value = min_val - max_phy_value = max_val - else: - min_phy_value = 0 - max_phy_value = 0 - kargs = { - 'conversion_type': v2c.CONVERSION_TYPE_NONE, - 'unit': signal.unit.encode('latin-1'), - 'min_phy_value': min_phy_value, - 'max_phy_value': max_phy_value, - } - - if texts: - gp_texts['conversion_tab'][-1] = texts - - block = ChannelConversion(**kargs) - if memory != 'minimum': - gp_conv.append(block) - else: - address = tell() - gp_conv.append(address) - write(bytes(block)) - - # source for channel - kargs = { - 'module_nr': 0, - 'module_address': 0, - 'type': v2c.SOURCE_ECU, - 'description': b'Channel inserted by Python Script', - } - block = ChannelExtension(**kargs) - if memory != 'minimum': - gp_source.append(block) - else: - address = tell() - gp_source.append(address) - write(bytes(block)) - - # compute additional byte offset for large records size - if offset > v2c.MAX_UINT16: - additional_byte_offset = (offset - v2c.MAX_UINT16) >> 3 - start_bit_offset = offset - additional_byte_offset << 3 - else: - start_bit_offset = offset - additional_byte_offset = 0 - s_type, s_size = fmt_to_datatype(signal.samples.dtype) - - if len(name) >= 32: - short_name = (name[:31] + '\0').encode('latin-1') - else: - short_name = name.encode('latin-1') - kargs = { - 'short_name': short_name, - 'channel_type': v2c.CHANNEL_TYPE_VALUE, - 'data_type': s_type, - 'min_raw_value': min_val if min_val <= max_val else 0, - 'max_raw_value': max_val if min_val <= max_val else 0, - 'start_offset': start_bit_offset, - 'bit_count': s_size, - 'aditional_byte_offset': additional_byte_offset, - } - if self.version == '2.00': - kargs['block_len'] = v2c.CN20_BLOCK_SIZE - else: - kargs['block_len'] = v2c.CN21_BLOCK_SIZE - comment = signal.comment - if comment: - if len(comment) >= 128: - comment = (comment[:127] + '\0').encode('latin-1') - else: - comment = comment.encode('latin-1') - kargs['description'] = comment - - channel = Channel(**kargs) - channel.name = name - if memory != 'minimum': - gp_channels.append(channel) - else: - address = tell() - gp_channels.append(address) - write(bytes(channel)) - offset += s_size - - if name not in self.channels_db: - self.channels_db[name] = [] - self.channels_db[name].append((dg_cntr, ch_cntr)) - - # update the parents as well - field_name = get_unique_name(field_names, name) - parents[ch_cntr] = field_name, 0 - - fields.append(signal.samples) - types.append((field_name, signal.samples.dtype)) - field_names.add(field_name) - - ch_cntr += 1 - - # simple channels don't have channel dependencies - gp_dep.append(None) - - # second, add the composed signals - for signal in composed_signals: - names = signal.samples.dtype.names - name = signal.name - - component_names = [] - component_samples = [] - if names: - samples = signal.samples[names[0]] - else: - samples = signal.samples - - shape = samples.shape[1:] - dims = [list(range(size)) for size in shape] - - for indexes in product(*dims): - subarray = samples - for idx in indexes: - subarray = subarray[:, idx] - component_samples.append(subarray) - - indexes = ''.join('[{}]'.format(idx) for idx in indexes) - component_name = '{}{}'.format(name, indexes) - component_names.append(component_name) - - # add channel dependency block for composed parent channel - sd_nr = len(component_samples) - kargs = {'sd_nr': sd_nr} - for i, dim in enumerate(shape[::-1]): - kargs['dim_{}'.format(i)] = dim - parent_dep = ChannelDependency(**kargs) - gp_dep.append(parent_dep) - - if names: - new_samples = [signal.samples[fld] for fld in names[1:]] - component_samples.extend(new_samples) - component_names.extend(names[1:]) - - # add composed parent signal texts - for _, item in gp['texts'].items(): - item.append(None) - - texts = {} - if len(name) >= 32 and self.version == '2.14': - block = TextBlock(text=name) - if memory != 'minimum': - texts['long_name_addr'] = block - else: - address = tell() - texts['long_name_addr'] = address - write(bytes(block)) - if texts: - gp_texts['channels'][-1] = texts - - # composed parent has no conversion - if memory != 'minimum': - gp_conv.append(None) - else: - gp_conv.append(0) - - # add parent and components sources - kargs = { - 'module_nr': 0, - 'module_address': 0, - 'type': v2c.SOURCE_ECU, - 'description': b'Channel inserted by Python Script', - } - block = ChannelExtension(**kargs) - if memory != 'minimum': - gp_source.append(block) - else: - address = tell() - gp_source.append(address) - write(bytes(block)) - - min_val, max_val = get_min_max(samples) - - s_type, s_size = fmt_to_datatype(samples.dtype) - # compute additional byte offset for large records size - if offset > v2c.MAX_UINT16: - additional_byte_offset = (offset - v2c.MAX_UINT16) >> 3 - start_bit_offset = offset - additional_byte_offset << 3 - else: - start_bit_offset = offset - additional_byte_offset = 0 - - if len(name) >= 32: - short_name = (name[:31] + '\0').encode('latin-1') - else: - short_name = name.encode('latin-1') - kargs = { - 'short_name': short_name, - 'channel_type': v2c.CHANNEL_TYPE_VALUE, - 'data_type': s_type, - 'min_raw_value': min_val if min_val <= max_val else 0, - 'max_raw_value': max_val if min_val <= max_val else 0, - 'start_offset': start_bit_offset, - 'bit_count': s_size, - 'aditional_byte_offset': additional_byte_offset, - } - if self.version == '2.00': - kargs['block_len'] = v2c.CN20_BLOCK_SIZE - else: - kargs['block_len'] = v2c.CN21_BLOCK_SIZE - comment = signal.comment - if comment: - if len(comment) >= 128: - comment = (comment[:127] + '\0').encode('latin-1') - else: - comment = comment.encode('latin-1') - kargs['description'] = comment - - channel = Channel(**kargs) - channel.name = name - channel.name = name - if memory != 'minimum': - gp_channels.append(channel) - else: - address = tell() - gp_channels.append(address) - write(bytes(channel)) - - if name not in self.channels_db: - self.channels_db[name] = [] - self.channels_db[name].append((dg_cntr, ch_cntr)) - - ch_cntr += 1 - - for i, (name, samples) in enumerate(zip(component_names, - component_samples)): - for _, item in gp['texts'].items(): - item.append(None) - - texts = {} - if len(name) >= 32 and self.version == '2.14': - block = TextBlock(text=name) - if memory != 'minimum': - texts['long_name_addr'] = block - else: - address = tell() - texts['long_name_addr'] = address - write(bytes(block)) - if texts: - gp_texts['channels'][-1] = texts - - min_val, max_val = get_min_max(samples) - s_type, s_size = fmt_to_datatype(samples.dtype) - shape = samples.shape[1:] - - kargs = { - 'module_nr': 0, - 'module_address': 0, - 'type': v2c.SOURCE_ECU, - 'description': b'Channel inserted by Python Script', - } - block = ChannelExtension(**kargs) - if memory != 'minimum': - gp_source.append(block) - else: - address = tell() - gp_source.append(address) - write(bytes(block)) - - if memory != 'minimum': - gp_conv.append(None) - else: - gp_conv.append(0) - - # compute additional byte offset for large records size - if offset > v2c.MAX_UINT16: - additional_byte_offset = (offset - v2c.MAX_UINT16) >> 3 - start_bit_offset = offset - additional_byte_offset << 3 - else: - start_bit_offset = offset - additional_byte_offset = 0 - - if len(name) >= 32: - short_name = (name[:31] + '\0').encode('latin-1') - else: - short_name = name.encode('latin-1') - kargs = { - 'short_name': short_name, - 'channel_type': v2c.CHANNEL_TYPE_VALUE, - 'data_type': s_type, - 'min_raw_value': min_val if min_val <= max_val else 0, - 'max_raw_value': max_val if min_val <= max_val else 0, - 'start_offset': start_bit_offset, - 'bit_count': s_size, - 'aditional_byte_offset': additional_byte_offset, - } - if self.version == '2.00': - kargs['block_len'] = v2c.CN20_BLOCK_SIZE - else: - kargs['block_len'] = v2c.CN21_BLOCK_SIZE - - channel = Channel(**kargs) - channel.name = name - if memory != 'minimum': - gp_channels.append(channel) - else: - address = tell() - gp_channels.append(address) - write(bytes(channel)) - size = s_size - for dim in shape: - size *= dim - offset += size - - if name not in self.channels_db: - self.channels_db[name] = [] - self.channels_db[name].append((dg_cntr, ch_cntr)) - - # update the parents as well - field_name = get_unique_name(field_names, name) - parents[ch_cntr] = field_name, 0 - - fields.append(samples) - types.append((field_name, samples.dtype, shape)) - field_names.add(field_name) - - gp_dep.append(None) - - if i < sd_nr: - dep_pair = ch_cntr, dg_cntr - parent_dep.referenced_channels.append(dep_pair) - else: - description = '{} - axis {}'.format(signal.name, name) - description = description.encode('latin-1') - channel['description'] = description - - ch_cntr += 1 - - # channel group - kargs = { - 'cycles_nr': cycles_nr, - 'samples_byte_nr': offset >> 3, - } - gp['channel_group'] = ChannelGroup(**kargs) - gp['channel_group']['ch_nr'] = ch_cntr - gp['size'] = cycles_nr * (offset >> 3) - - # data group - if self.version in ('3.20', '3.30'): - block_len = v2c.DG32_BLOCK_SIZE - else: - block_len = v2c.DG31_BLOCK_SIZE - gp['data_group'] = DataGroup(block_len=block_len) - - # data block - if PYVERSION == 2: - types = fix_dtype_fields(types) - types = dtype(types) - - gp['types'] = types - gp['parents'] = parents - gp['sorted'] = True - - samples = fromarrays(fields, dtype=types) - block = samples.tostring() - - if memory == 'full': - gp['data_location'] = v2c.LOCATION_MEMORY - kargs = {'data': block} - gp['data_block'] = DataBlock(**kargs) - else: - gp['data_location'] = v2c.LOCATION_TEMPORARY_FILE - if cycles_nr: - data_address = tell() - gp['data_group']['data_block_addr'] = data_address - self._tempfile.write(block) - else: - gp['data_group']['data_block_addr'] = 0 - - # data group trigger - gp['trigger'] = [None, None] - - for signal in new_groups_signals: - dg_cntr = len(self.groups) - gp = {} - gp['channels'] = gp_channels = [] - gp['channel_conversions'] = gp_conv = [] - gp['channel_extensions'] = gp_source = [] - gp['channel_dependencies'] = gp_dep = [] - gp['texts'] = gp_texts = { - 'channels': [], - 'conversion_tab': [], - 'channel_group': [], - } - self.groups.append(gp) - - cycles_nr = len(t) - fields = [] - types = [] - parents = {} - ch_cntr = 0 - offset = 0 - field_names = set() - - # setup all blocks related to the time master channel - - # time channel texts - for _, item in gp_texts.items(): - item.append(None) - - # conversion for time channel - kargs = { - 'conversion_type': v2c.CONVERSION_TYPE_NONE, - 'unit': b's', - 'min_phy_value': t[0] if cycles_nr else 0, - 'max_phy_value': t[-1] if cycles_nr else 0, - } - block = ChannelConversion(**kargs) - if memory == 'minimum': - address = tell() - write(bytes(block)) - gp_conv.append(address) - else: - gp_conv.append(ChannelConversion(**kargs)) - - # source for time - kargs = { - 'module_nr': 0, - 'module_address': 0, - 'type': v2c.SOURCE_ECU, - 'description': b'Channel inserted by Python Script', - } - block = ChannelExtension(**kargs) - if memory != 'minimum': - gp_source.append(block) - else: - address = tell() - gp_source.append(address) - write(bytes(block)) - - # time channel - t_type, t_size = fmt_to_datatype(t.dtype) - kargs = { - 'short_name': b't', - 'channel_type': v2c.CHANNEL_TYPE_MASTER, - 'data_type': t_type, - 'start_offset': 0, - 'min_raw_value': t[0] if cycles_nr else 0, - 'max_raw_value': t[-1] if cycles_nr else 0, - 'bit_count': t_size, - } - if self.version == '2.00': - kargs['block_len'] = v2c.CN20_BLOCK_SIZE - else: - kargs['block_len'] = v2c.CN21_BLOCK_SIZE - channel = Channel(**kargs) - channel.name = name = 't' - if memory != 'minimum': - gp_channels.append(channel) - else: - address = tell() - gp_channels.append(address) - write(bytes(channel)) - - if name not in self.channels_db: - self.channels_db[name] = [] - self.channels_db[name].append((dg_cntr, ch_cntr)) - self.masters_db[dg_cntr] = 0 - # data group record parents - parents[ch_cntr] = name, 0 - - # time channel doesn't have channel dependencies - gp_dep.append(None) - - fields.append(t) - types.append((name, t.dtype)) - field_names.add(name) - - offset += t_size - ch_cntr += 1 - - names = signal.samples.dtype.names - if names == ('ms', - 'days'): - block = TextBlock(text='From mdf v4 CANopen Time channel') - if memory == 'minimum': - address = tell() - write(bytes(block)) - gp_texts['channel_group'][-1] = {'comment_addr': address} - else: - gp_texts['channel_group'][-1] = {'comment_addr': block} - elif names == ('ms', - 'min', - 'hour', - 'day', - 'month', - 'year', - 'summer_time', - 'day_of_week'): - block = TextBlock(text='From mdf v4 CANopen Date channel') - if memory == 'minimum': - address = tell() - write(bytes(block)) - gp_texts['channel_group'][-1] = {'comment_addr': address} - else: - gp_texts['channel_group'][-1] = {'comment_addr': block} - else: - text = 'From mdf v4 structure channel composition' - block = TextBlock(text=text) - if memory == 'minimum': - address = tell() - write(bytes(block)) - gp_texts['channel_group'][-1] = {'comment_addr': address} - else: - gp_texts['channel_group'][-1] = {'comment_addr': block} - - for name in names: - - samples = signal.samples[name] - - # channels texts - for _, item in gp['texts'].items(): - item.append(None) - - texts = {} - if len(name) >= 32 and self.version == '2.14': - block = TextBlock(text=name) - texts['long_name_addr'] = block - if texts: - gp_texts['channels'][-1] = texts - - # conversions for channel - min_val, max_val = get_min_max(samples) - - kargs = { - 'conversion_type': v2c.CONVERSION_TYPE_NONE, - 'unit': signal.unit.encode('latin-1'), - 'min_phy_value': min_val if min_val <= max_val else 0, - 'max_phy_value': max_val if min_val <= max_val else 0, - } - block = ChannelConversion(**kargs) - if memory != 'minimum': - gp_conv.append(block) - else: - address = tell() - gp_conv.append(address) - write(bytes(block)) - - # source for channel - kargs = { - 'module_nr': 0, - 'module_address': 0, - 'type': v2c.SOURCE_ECU, - 'description': b'Channel inserted by Python Script', - } - block = ChannelExtension(**kargs) - if memory != 'minimum': - gp_source.append(block) - else: - address = tell() - gp_source.append(address) - write(bytes(block)) - - # compute additional byte offset for large records size - if offset > v2c.MAX_UINT16: - additional_byte_offset = (offset - v2c.MAX_UINT16) >> 3 - start_bit_offset = offset - additional_byte_offset << 3 - else: - start_bit_offset = offset - additional_byte_offset = 0 - s_type, s_size = fmt_to_datatype(samples.dtype) - if len(name) >= 32: - short_name = (name[:31] + '\0').encode('latin-1') - else: - short_name = name.encode('latin-1') - kargs = { - 'short_name': short_name, - 'channel_type': v2c.CHANNEL_TYPE_VALUE, - 'data_type': s_type, - 'min_raw_value': min_val if min_val <= max_val else 0, - 'max_raw_value': max_val if min_val <= max_val else 0, - 'start_offset': start_bit_offset, - 'bit_count': s_size, - 'aditional_byte_offset': additional_byte_offset, - } - if self.version == '2.00': - kargs['block_len'] = v2c.CN20_BLOCK_SIZE - else: - kargs['block_len'] = v2c.CN21_BLOCK_SIZE - - channel = Channel(**kargs) - channel.name = name - if memory != 'minimum': - gp_channels.append(channel) - else: - address = tell() - gp_channels.append(address) - write(bytes(channel)) - offset += s_size - - if name not in self.channels_db: - self.channels_db[name] = [] - self.channels_db[name].append((dg_cntr, ch_cntr)) - - # update the parents as well - field_name = get_unique_name(field_names, name) - parents[ch_cntr] = field_name, 0 - - fields.append(samples) - types.append((field_name, samples.dtype)) - field_names.add(field_name) - - ch_cntr += 1 - - # simple channels don't have channel dependencies - gp_dep.append(None) - - # channel group - kargs = { - 'cycles_nr': cycles_nr, - 'samples_byte_nr': offset >> 3, - } - gp['channel_group'] = ChannelGroup(**kargs) - gp['channel_group']['ch_nr'] = ch_cntr - gp['size'] = cycles_nr * (offset >> 3) - - # data group - if self.version in ('3.20', '3.30'): - block_len = v2c.DG32_BLOCK_SIZE - else: - block_len = v2c.DG31_BLOCK_SIZE - gp['data_group'] = DataGroup(block_len=block_len) - - # data block - if PYVERSION == 2: - types = fix_dtype_fields(types) - types = dtype(types) - - gp['types'] = types - gp['parents'] = parents - gp['sorted'] = True - - samples = fromarrays(fields, dtype=types) - try: - block = samples.tostring() - - if memory == 'full': - gp['data_location'] = v2c.LOCATION_MEMORY - kargs = {'data': block} - gp['data_block'] = DataBlock(**kargs) - else: - gp['data_location'] = v2c.LOCATION_TEMPORARY_FILE - if cycles_nr: - data_address = tell() - gp['data_group']['data_block_addr'] = data_address - self._tempfile.write(block) - else: - gp['data_group']['data_block_addr'] = 0 - except MemoryError: - if memory == 'full': - raise - else: - gp['data_location'] = v2c.LOCATION_TEMPORARY_FILE - - data_address = tell() - gp['data_group']['data_block_addr'] = data_address - for sample in samples: - self._tempfile.write(sample.tostring()) - - # data group trigger - gp['trigger'] = [None, None] - - def close(self): - """if the MDF was created with memory='minimum' and new - channels have been appended, then this must be called just before the - object is not used anymore to clean-up the temporary file - - """ - if self._tempfile is not None: - self._tempfile.close() - if self._file is not None: - self._file.close() - - def get_channel_unit(self, name=None, group=None, index=None): - """Gets channel unit. - - Channel can be specified in two ways: - - * using the first positional argument *name* - - * if there are multiple occurrences for this channel then the - *group* and *index* arguments can be used to select a specific - group. - * if there are multiple occurrences for this channel and either the - *group* or *index* arguments is None then a warning is issued - - * using the group number (keyword argument *group*) and the channel - number (keyword argument *index*). Use *info* method for group and - channel numbers - - - If the *raster* keyword argument is not *None* the output is - interpolated accordingly. - - Parameters - ---------- - name : string - name of channel - group : int - 0-based group index - index : int - 0-based channel index - - Returns - ------- - unit : str - found channel unit - - """ - gp_nr, ch_nr = self._validate_channel_selection( - name, - group, - index, - ) - - grp = self.groups[gp_nr] - if grp['data_location'] == v2c.LOCATION_ORIGINAL_FILE: - stream = self._file - else: - stream = self._tempfile - - if self.memory == 'minimum': - addr = grp['channel_conversions'][ch_nr] - if addr: - conversion = ChannelConversion( - address=addr, - stream=stream, - ) - else: - conversion = None - - else: - conversion = grp['channel_conversions'][ch_nr] - - if conversion: - unit = conversion['unit'].decode('latin-1').strip(' \n\t\0') - else: - unit = '' - - return unit - - def get_channel_comment(self, name=None, group=None, index=None): - """Gets channel comment. - - Channel can be specified in two ways: - - * using the first positional argument *name* - - * if there are multiple occurrences for this channel then the - *group* and *index* arguments can be used to select a specific - group. - * if there are multiple occurrences for this channel and either the - *group* or *index* arguments is None then a warning is issued - - * using the group number (keyword argument *group*) and the channel - number (keyword argument *index*). Use *info* method for group and - channel numbers - - - If the *raster* keyword argument is not *None* the output is - interpolated accordingly. - - Parameters - ---------- - name : string - name of channel - group : int - 0-based group index - index : int - 0-based channel index - - Returns - ------- - comment : str - found channel comment - - """ - gp_nr, ch_nr = self._validate_channel_selection( - name, - group, - index, - ) - - grp = self.groups[gp_nr] - if grp['data_location'] == v2c.LOCATION_ORIGINAL_FILE: - stream = self._file - else: - stream = self._tempfile - - if self.memory == 'minimum': - channel = Channel( - address=grp['channels'][ch_nr], - stream=stream, - ) - else: - channel = grp['channels'][ch_nr] - - comment = channel['description'].decode('latin-1') - comment = comment.strip(' \t\n\0') - - return comment - - def get(self, - name=None, - group=None, - index=None, - raster=None, - samples_only=False, - data=None): - """Gets channel samples. - - Channel can be specified in two ways: - - * using the first positional argument *name* - - * if there are multiple occurrences for this channel then the - *group* and *index* arguments can be used to select a specific - group. - * if there are multiple occurrences for this channel and either the - *group* or *index* arguments is None then a warning is issued - - * using the group number (keyword argument *group*) and the channel - number (keyword argument *index*). Use *info* method for group and - channel numbers - - - If the *raster* keyword argument is not *None* the output is - interpolated accordingly. - - Parameters - ---------- - name : string - name of channel - group : int - 0-based group index - index : int - 0-based channel index - raster : float - time raster in seconds - samples_only : bool - if *True* return only the channel samples as numpy array; if - *False* return a *Signal* object - - Returns - ------- - res : (numpy.array | Signal) - returns *Signal* if *samples_only*=*False* (default option), - otherwise returns numpy.array. - The *Signal* samples are: - - * numpy recarray for channels that have CDBLOCK or BYTEARRAY - type channels - * numpy array for all the rest - - Raises - ------ - MdfError : - - * if the channel name is not found - * if the group index is out of range - * if the channel index is out of range - - """ - gp_nr, ch_nr = self._validate_channel_selection( - name, - group, - index, - ) - - memory = self.memory - grp = self.groups[gp_nr] - - if grp['data_location'] == v2c.LOCATION_ORIGINAL_FILE: - stream = self._file - else: - stream = self._tempfile - - channel = grp['channels'][ch_nr] - conversion = grp['channel_conversions'][ch_nr] - - if memory != 'minimum': - channel = grp['channels'][ch_nr] - conversion = grp['channel_conversions'][ch_nr] - name = channel.name - else: - channel = Channel( - address=grp['channels'][ch_nr], - stream=stream, - ) - addr = grp['channel_conversions'][ch_nr] - if addr: - conversion = ChannelConversion( - address=addr, - stream=stream, - ) - else: - conversion = None - if name is None: - if channel.get('long_name_addr', 0): - name = TextBlock( - address=channel['long_name_addr'], - stream=stream, - ) - name = name['text'] - else: - name = channel['short_name'] - name = name.decode('utf-8').strip(' \r\t\n\0') - name = name.split('\\')[0] - channel.name = name - - dep = grp['channel_dependencies'][ch_nr] - cycles_nr = grp['channel_group']['cycles_nr'] - - # get data group record - if data is None: - data = self._load_group_data(grp) - - info = None - - # check if this is a channel array - if dep: - if dep['dependency_type'] == v2c.DEPENDENCY_TYPE_VECTOR: - shape = [dep['sd_nr'], ] - elif dep['dependency_type'] >= v2c.DEPENDENCY_TYPE_NDIM: - shape = [] - i = 0 - while True: - try: - dim = dep['dim_{}'.format(i)] - shape.append(dim) - i += 1 - except KeyError: - break - shape = shape[::-1] - - record_shape = tuple(shape) - - arrays = [ - self.get(group=dg_nr, index=ch_nr, samples_only=True) - for ch_nr, dg_nr in dep.referenced_channels - ] - if cycles_nr: - shape.insert(0, cycles_nr) - - vals = column_stack(arrays).flatten().reshape(tuple(shape)) - - arrays = [vals, ] - types = [(channel.name, vals.dtype, record_shape), ] - - if PYVERSION == 2: - types = fix_dtype_fields(types) - - types = dtype(types) - vals = fromarrays(arrays, dtype=types) - - else: - # get channel values - try: - parents, dtypes = grp['parents'], grp['types'] - except KeyError: - grp['parents'], grp['types'] = self._prepare_record(grp) - parents, dtypes = grp['parents'], grp['types'] - - try: - parent, bit_offset = parents[ch_nr] - except KeyError: - parent, bit_offset = None, None - - if parent is not None: - if 'record' not in grp: - if dtypes.itemsize: - record = fromstring(data, dtype=dtypes) - else: - record = None - - if memory == 'full': - grp['record'] = record - else: - record = grp['record'] - - vals = record[parent] - bits = channel['bit_count'] - size = vals.dtype.itemsize - data_type = channel['data_type'] - - if vals.dtype.kind not in 'ui' and (bit_offset or not bits == size * 8): - vals = self._get_not_byte_aligned_data(data, grp, ch_nr) - else: - if bit_offset: - dtype_ = vals.dtype - if dtype_.kind == 'i': - vals = vals.astype(dtype('>= bit_offset - else: - vals = vals >> bit_offset - - if not bits == size * 8: - mask = (1 << bits) - 1 - if vals.flags.writeable: - vals &= mask - else: - vals = vals & mask - if data_type in v2c.SIGNED_INT: - size = vals.dtype.itemsize - mask = (1 << (size * 8)) - 1 - mask = (mask << bits) & mask - vals |= mask - vals = vals.astype('>> mdf = MDF2('test.mdf') - >>> mdf.info() - - """ - info = {} - for key in ('author', - 'organization', - 'project', - 'subject'): - value = self.header[key].decode('latin-1').strip(' \n\t\0') - info[key] = value - info['version'] = self.version - info['groups'] = len(self.groups) - for i, gp in enumerate(self.groups): - if gp['data_location'] == v2c.LOCATION_ORIGINAL_FILE: - stream = self._file - elif gp['data_location'] == v2c.LOCATION_TEMPORARY_FILE: - stream = self._tempfile - inf = {} - info['group {}'.format(i)] = inf - inf['cycles'] = gp['channel_group']['cycles_nr'] - inf['channels count'] = len(gp['channels']) - for j, channel in enumerate(gp['channels']): - if self.memory != 'minimum': - name = channel.name - else: - channel = Channel( - address=channel, - stream=stream, - ) - if channel.get('long_name_addr', 0): - name = TextBlock( - address=channel['long_name_addr'], - stream=stream, - ) - name = name['text'] - else: - name = channel['short_name'] - name = name.decode('utf-8').strip(' \r\t\n\0') - name = name.split('\\')[0] - - if channel['channel_type'] == v2c.CHANNEL_TYPE_MASTER: - ch_type = 'master' - else: - ch_type = 'value' - inf['channel {}'.format(j)] = 'name="{}" type={}'.format(name, ch_type) - - return info - - def save(self, dst='', overwrite=None, compression=0): - """Save MDF to *dst*. If *dst* is not provided the the destination file - name is the MDF name. If overwrite is *True* then the destination file - is overwritten, otherwise the file name is appended with '_', were - '' is the first counter that produces a new file name (that does - not already exist in the filesystem). - - Parameters - ---------- - dst : str - destination file name, Default '' - overwrite : bool - overwrite flag, default *False* - compression : int - does nothing for mdf version3; introduced here to share the same - API as mdf version 4 files - - """ - - if overwrite is None: - overwrite = self._overwrite - - if self.name is None and dst == '': - message = ('Must specify a destination file name ' - 'for MDF created from scratch') - raise MdfException(message) - - dst = dst if dst else self.name - if overwrite is False: - if os.path.isfile(dst): - cntr = 0 - while True: - name = os.path.splitext(dst)[0] + '_{}.mdf'.format(cntr) - if not os.path.isfile(name): - break - else: - cntr += 1 - message = ('Destination file "{}" already exists ' - 'and "overwrite" is False. Saving MDF file as "{}"') - message = message.format(dst, name) - warnings.warn(message) - dst = name - - if self.memory != 'minimum': - self._save_with_metadata(dst, overwrite, compression) - else: - self._save_without_metadata(dst, overwrite, compression) - - def _save_with_metadata(self, dst, overwrite, compression): - """Save MDF to *dst*. If *dst* is not provided the the destination file - name is the MDF name. If overwrite is *True* then the destination file - is overwritten, otherwise the file name is appended with '_', were - '' is the first counter that produces a new file name (that does - not already exist in the filesystem). - - Parameters - ---------- - dst : str - destination file name, Default '' - overwrite : bool - overwrite flag, default *False* - compression : int - does nothing for mdf version3; introduced here to share the same - API as mdf version 4 files - - """ - - if self.file_history is None: - self.file_history = TextBlock(text=''' -created -asammdf - -{} -'''.format(__version__)) - else: - text = '{}\n{}: updated by asammdf {}' - old_history = self.file_history['text'].decode('latin-1') - timestamp = time.asctime().encode('latin-1') - - text = text.format( - old_history, - timestamp, - __version__, - ) - self.file_history = TextBlock(text=text) - - if self.name is None and dst == '': - message = ('Must specify a destination file name ' - 'for MDF created from scratch') - raise MdfException(message) - - dst = dst if dst else self.name - if overwrite is False: - if os.path.isfile(dst): - cntr = 0 - while True: - name = os.path.splitext(dst)[0] + '_{}.mdf'.format(cntr) - if not os.path.isfile(name): - break - else: - cntr += 1 - message = ('Destination file "{}" already exists ' - 'and "overwrite" is False. Saving MDF file as "{}"') - message = message.format(dst, name) - warnings.warn(message) - dst = name - - # all MDF blocks are appended to the blocks list in the order in which - # they will be written to disk. While creating this list, all the - # relevant block links are updated so that once all blocks have been - # added to the list they can be written using the bytes protocol. - # DataGroup blocks are written first after the identification and - # header blocks. When memory='low' we need to restore the - # original data block addresses within the data group block. This is - # needed to allow further work with the object after the save method - # call (eq. new calls to get method). Since the data group blocks are - # written first, it is safe to restor the original links when the data - # blocks are written. For memory=False the blocks list will - # contain a tuple instead of a DataBlock instance; the tuple will have - # the reference to the data group object and the original link to the - # data block in the soource MDF file. - - if self.memory == 'low' and dst == self.name: - destination = dst + '.temp' - else: - destination = dst - - with open(destination, 'wb+') as dst_: - defined_texts = {} - - write = dst_.write - # list of all blocks - blocks = [] - - address = 0 - - blocks.append(self.identification) - address += v2c.ID_BLOCK_SIZE - - blocks.append(self.header) - address += self.header['block_len'] - - self.file_history.address = address - blocks.append(self.file_history) - address += self.file_history['block_len'] - - # DataGroup - # put them first in the block list so they will be written first to - # disk this way, in case of memory=False, we can safely - # restore he original data block address - for gp in self.groups: - dg = gp['data_group'] - blocks.append(dg) - dg.address = address - address += dg['block_len'] - - if self.groups: - for i, dg in enumerate(self.groups[:-1]): - addr = self.groups[i+1]['data_group'].address - dg['data_group']['next_dg_addr'] = addr - self.groups[-1]['data_group']['next_dg_addr'] = 0 - - for gp in self.groups: - gp_texts = gp['texts'] - - # Texts - for item_list in gp_texts.values(): - for my_dict in item_list: - if my_dict is None: - continue - for key, tx_block in my_dict.items(): - # text blocks can be shared - text = tx_block['text'] - if text in defined_texts: - tx_block.address = defined_texts[text] - else: - defined_texts[text] = address - tx_block.address = address - blocks.append(tx_block) - address += tx_block['block_len'] - - # ChannelConversions - cc = gp['channel_conversions'] - for i, conv in enumerate(cc): - if conv is None: - continue - - conv.address = address - if conv['conversion_type'] == v2c.CONVERSION_TYPE_VTABR: - pairs = gp_texts['conversion_tab'][i].items() - for key, item in pairs: - conv[key] = item.address - - blocks.append(conv) - address += conv['block_len'] - - # Channel Extension - cs = gp['channel_extensions'] - for source in cs: - if source: - source.address = address - blocks.append(source) - address += source['block_len'] - - # Channel Dependency - cd = gp['channel_dependencies'] - for dep in cd: - if dep: - dep.address = address - blocks.append(dep) - address += dep['block_len'] - - # Channels - ch_texts = gp_texts['channels'] - for i, channel in enumerate(gp['channels']): - channel.address = address - channel_texts = ch_texts[i] - - blocks.append(channel) - if self.version == '2.00': - address += v2c.CN20_BLOCK_SIZE - else: - address += v2c.CN21_BLOCK_SIZE - - if channel_texts: - for key in ('long_name_addr', - 'comment_addr'): - if key in channel_texts: - channel[key] = channel_texts[key].address - else: - channel[key] = 0 - - channel['conversion_addr'] = cc[i].address if cc[i] else 0 - if cs[i]: - channel['source_depend_addr'] = cs[i].address - else: - channel['source_depend_addr'] = 0 - if cd[i]: - channel['ch_depend_addr'] = cd[i].address - else: - channel['ch_depend_addr'] = 0 - - for channel, next_channel in pair(gp['channels']): - channel['next_ch_addr'] = next_channel.address - next_channel['next_ch_addr'] = 0 - - # ChannelGroup - cg = gp['channel_group'] - cg.address = address - blocks.append(cg) - address += cg['block_len'] - - cg['first_ch_addr'] = gp['channels'][0].address - cg['next_cg_addr'] = 0 - cg_texts = gp['texts']['channel_group'][0] - if 'comment_addr' in cg_texts: - addr = cg_texts['comment_addr'].address - cg['comment_addr'] = addr - - # TriggerBLock - trigger, trigger_text = gp['trigger'] - if trigger: - if trigger_text: - trigger_text.address = address - blocks.append(trigger_text) - address += trigger_text['block_len'] - trigger['comment_addr'] = trigger_text.address - else: - trigger['comment_addr'] = 0 - - trigger.address = address - blocks.append(trigger) - address += trigger['block_len'] - - # DataBlock - original_data_addr = gp['data_group']['data_block_addr'] - if gp['size']: - gp['data_group']['data_block_addr'] = address - else: - gp['data_group']['data_block_addr'] = 0 - address += gp['size'] - if self.memory == 'full': - blocks.append(gp['data_block']) - else: - # trying to call bytes([gp, address]) will result in an - # exceptionthat be used as a flag for non existing data - # block in caseof memory=False, the address is - # the actual addressof the data group's data within the - # original file - blocks.append([gp, original_data_addr]) - - # update referenced channels addresses in the channel dependecies - for gp in self.groups: - for dep in gp['channel_dependencies']: - if not dep: - continue - - for i, pair_ in enumerate(dep.referenced_channels): - ch_nr, dg_nr = pair_ - grp = self.groups[dg_nr] - ch = grp['channels'][ch_nr] - dep['ch_{}'.format(i)] = ch.address - dep['cg_{}'.format(i)] = grp['channel_group'].address - dep['dg_{}'.format(i)] = grp['data_group'].address - - # DataGroup - for gp in self.groups: - gp['data_group']['first_cg_addr'] = gp['channel_group'].address - if gp['trigger'][0]: - gp['data_group']['trigger_addr'] = gp['trigger'][0].address - else: - gp['data_group']['trigger_addr'] = 0 - - if self.groups: - address = self.groups[0]['data_group'].address - self.header['first_dg_addr'] = address - self.header['dg_nr'] = len(self.groups) - self.header['comment_addr'] = self.file_history.address - self.header['program_addr'] = 0 - - for block in blocks: - try: - write(bytes(block)) - except: - # this will only be executed for data blocks when - # memory=False - gp, address = block - # restore data block address from original file so that - # future calls to get will still work after the save - gp['data_group']['data_block_addr'] = address - data = self._load_group_data(gp) - write(data) - - if self.memory == 'low' and dst == self.name: - self.close() - os.remove(self.name) - os.rename(destination, self.name) - - self.groups = [] - self.header = None - self.identification = None - self.file_history = [] - self.channels_db = {} - self.masters_db = {} - self.attachments = [] - self.file_comment = None - - self._ch_map = {} - self._master_channel_cache = {} - - self._tempfile = TemporaryFile() - self._file = open(self.name, 'rb') - self._read() - - def _save_without_metadata(self, dst, overwrite, compression): - """Save MDF to *dst*. If *dst* is not provided the the destination file - name is the MDF name. If overwrite is *True* then the destination file - is overwritten, otherwise the file name is appended with '_', were - '' is the first counter that produces a new file name (that does - not already exist in the filesystem). - - Parameters - ---------- - dst : str - destination file name, Default '' - overwrite : bool - overwrite flag, default *False* - compression : int - does nothing for mdf version3; introduced here to share the same - API as mdf version 4 files - - """ - - if self.file_history is None: - self.file_history = TextBlock(text=''' -created -asammdf - -{} -'''.format(__version__)) - else: - text = '{}\n{}: updated by asammdf {}' - old_history = self.file_history['text'].decode('latin-1') - timestamp = time.asctime().encode('latin-1') - - text = text.format( - old_history, - timestamp, - __version__, - ) - self.file_history = TextBlock(text=text) - - # all MDF blocks are appended to the blocks list in the order in which - # they will be written to disk. While creating this list, all the - # relevant block links are updated so that once all blocks have been - # added to the list they can be written using the bytes protocol. - # DataGroup blocks are written first after the identification and - # header blocks. When memory=False we need to restore the - # original data block addresses within the data group block. This is - # needed to allow further work with the object after the save method - # call (eq. new calls to get method). Since the data group blocks are - # written first, it is safe to restor the original links when the data - # blocks are written. For memory=False the blocks list will - # contain a tuple instead of a DataBlock instance; the tuple will have - # the reference to the data group object and the original link to the - # data block in the soource MDF file. - - if dst == self.name: - destination = dst + '.temp' - else: - destination = dst - - with open(destination, 'wb+') as dst_: - defined_texts = {} - - write = dst_.write - tell = dst_.tell - seek = dst_.seek - # list of all blocks - blocks = [] - - address = 0 - - write(bytes(self.identification)) - - write(bytes(self.header)) - - address = tell() - self.file_history.address = address - write(bytes(self.file_history)) - - # DataGroup - # put them first in the block list so they will be written first to - # disk this way, in case of memory=False, we can safely - # restore he original data block address - - data_address = [] - - for gp in self.groups: - gp_texts = deepcopy(gp['texts']) - if gp['data_location'] == v2c.LOCATION_ORIGINAL_FILE: - stream = self._file - else: - stream = self._tempfile - - # Texts - for item_list in gp_texts.values(): - for my_dict in item_list: - if my_dict is None: - continue - for key, tx_block in my_dict.items(): - - # text blocks can be shared - block = TextBlock( - address=tx_block, - stream=stream, - ) - text = block['text'] - if text in defined_texts: - my_dict[key] = defined_texts[text] - else: - address = tell() - defined_texts[text] = address - my_dict[key] = address - write(bytes(block)) - - # ChannelConversions - cc = gp['temp_channel_conversions'] = [] - for i, conv in enumerate(gp['channel_conversions']): - if not conv: - gp['temp_channel_conversions'].append(0) - continue - - address = tell() - gp['temp_channel_conversions'].append(address) - conv = ChannelConversion( - address=conv, - stream=stream, - ) - if conv['conversion_type'] == v2c.CONVERSION_TYPE_VTABR: - pairs = gp_texts['conversion_tab'][i].items() - for key, item in pairs: - conv[key] = item - - write(bytes(conv)) - - # Channel Extension - cs = gp['temp_channel_extensions'] = [] - for source in gp['channel_extensions']: - if source: - address = tell() - gp['temp_channel_extensions'].append(address) - source = ChannelExtension( - address=source, - stream=stream, - ) - write(bytes(source)) - else: - gp['temp_channel_extensions'].append(0) - - # Channel Dependency - cd = gp['temp_channel_dependencies'] = [] - for dep in gp['channel_dependencies']: - if dep: - address = tell() - gp['temp_channel_dependencies'].append(address) - dep.address = address - write(bytes(dep)) - else: - gp['temp_channel_dependencies'].append(0) - - # Channels - blocks = [] - address = tell() - ch_texts = gp_texts['channels'] - gp['temp_channels'] = ch_addrs = [] - gp['channel_group']['first_ch_addr'] = address - for i, channel in enumerate(gp['channels']): - channel = Channel( - address=channel, - stream=stream, - ) - channel.address = address - channel_texts = ch_texts[i] - - ch_addrs.append(address) - - address += channel['block_len'] - blocks.append(channel) - - if channel_texts: - for key in ('long_name_addr', - 'comment_addr'): - if key in channel_texts: - channel[key] = channel_texts[key] - else: - channel[key] = 0 - else: - for key in ('long_name_addr', - 'comment_addr'): - channel[key] = 0 - if self.version == '2.00' and 'long_name_addr' in channel: - del channel['long_name_addr'] - - channel['conversion_addr'] = cc[i] - channel['source_depend_addr'] = cs[i] - channel['ch_depend_addr'] = cd[i] - - group_channels = gp['channels'] - if group_channels: - for j, channel in enumerate(blocks[:-1]): - channel['next_ch_addr'] = blocks[j+1].address - blocks[-1]['next_ch_addr'] = 0 - for block in blocks: - write(bytes(block)) - - blocks = None - - address = tell() - - # ChannelGroup - cg = gp['channel_group'] - cg.address = address - - cg['next_cg_addr'] = 0 - cg_texts = gp_texts['channel_group'][0] - if 'comment_addr' in cg_texts: - addr = cg_texts['comment_addr'] - cg['comment_addr'] = addr - write(bytes(cg)) - - address = tell() - - # TriggerBLock - trigger, trigger_text = gp['trigger'] - if trigger: - if trigger_text: - trigger_text.address = address - write(bytes(trigger_text)) - trigger['comment_addr'] = trigger_text.address - else: - trigger['comment_addr'] = 0 - - address = tell() - trigger.address = address - write(bytes(trigger)) - - address = tell() - - # DataBlock - data = self._load_group_data(gp) - - if data: - data_address.append(address) - write(bytes(data)) - else: - data_address.append(0) - - del gp['temp_channel_conversions'] - del gp['temp_channel_extensions'] - - orig_addr = [gp['data_group']['data_block_addr'] for gp in self.groups] - address = tell() - for i, gp in enumerate(self.groups): - dg = gp['data_group'] - dg['data_block_addr'] = data_address[i] - dg.address = address - address += dg['block_len'] - gp['data_group']['first_cg_addr'] = gp['channel_group'].address - if gp['trigger'][0]: - gp['data_group']['trigger_addr'] = gp['trigger'][0].address - else: - gp['data_group']['trigger_addr'] = 0 - - if self.groups: - for i, gp in enumerate(self.groups[:-1]): - addr = self.groups[i+1]['data_group'].address - gp['data_group']['next_dg_addr'] = addr - self.groups[-1]['data_group']['next_dg_addr'] = 0 - - for i, gp in enumerate(self.groups): - write(bytes(gp['data_group'])) - gp['data_block_addr'] = orig_addr[i] - - if self.groups: - address = self.groups[0]['data_group'].address - self.header['first_dg_addr'] = address - self.header['dg_nr'] = len(self.groups) - self.header['comment_addr'] = self.file_history.address - self.header['program_addr'] = 0 - - # update referenced channels addresses in the channel dependecies - for gp in self.groups: - for dep in gp['channel_dependencies']: - if not dep: - continue - - for i, pair_ in enumerate(dep.referenced_channels): - _, dg_nr = pair_ - grp = self.groups[dg_nr] - dep['ch_{}'.format(i)] = grp['temp_channels'][i] - dep['cg_{}'.format(i)] = grp['channel_group'].address - dep['dg_{}'.format(i)] = grp['data_group'].address - seek(dep.address, v2c.SEEK_START) - write(bytes(dep)) - - seek(v2c.ID_BLOCK_SIZE, v2c.SEEK_START) - write(bytes(self.header)) - - for gp in self.groups: - del gp['temp_channels'] - - if dst == self.name: - self.close() - os.remove(self.name) - os.rename(destination, self.name) - - self.groups = [] - self.header = None - self.identification = None - self.file_history = [] - self.channels_db = {} - self.masters_db = {} - self.attachments = [] - self.file_comment = None - - self._ch_map = {} - self._master_channel_cache = {} - - self._tempfile = TemporaryFile() - self._file = open(self.name, 'rb') - self._read() - - -if __name__ == '__main__': - pass diff --git a/asammdf/mdf_v2.py b/asammdf/mdf_v2.py new file mode 100644 index 000000000..2f24664cb --- /dev/null +++ b/asammdf/mdf_v2.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +""" ASAM MDF version 2 file format module """ + +from __future__ import division, print_function + +from .mdf_v3 import MDF3 + + +__all__ = ['MDF2', ] + + +# MDF versions 2 and 3 share the same implementation +class MDF2(MDF3): + pass + + +if __name__ == '__main__': + pass diff --git a/asammdf/mdf3.py b/asammdf/mdf_v3.py similarity index 70% rename from asammdf/mdf3.py rename to asammdf/mdf_v3.py index 6b367bbe7..0f26c7707 100644 --- a/asammdf/mdf3.py +++ b/asammdf/mdf_v3.py @@ -1,58 +1,58 @@ # -*- coding: utf-8 -*- """ ASAM MDF version 3 file format module """ -from __future__ import print_function, division -import sys - +from __future__ import division, print_function import os +import sys import time import warnings - from collections import defaultdict -from functools import reduce, partial -from tempfile import TemporaryFile -from itertools import product from copy import deepcopy +from functools import reduce +from itertools import product +from tempfile import TemporaryFile +from struct import unpack +from numexpr import evaluate from numpy import ( - interp, - linspace, - dtype, + arange, + array, array_equal, + clip, column_stack, - array, - searchsorted, - log, + dtype, exp, - clip, - union1d, - float64, flip, - unpackbits, + float64, + interp, + linspace, + log, + ones, packbits, roll, - zeros, + searchsorted, uint8, - arange, + union1d, + unpackbits, + zeros, ) -from numpy.core.records import fromstring, fromarrays from numpy.core.defchararray import encode -from numexpr import evaluate +from numpy.core.records import fromarrays, fromstring +from . import v2_v3_constants as v23c +from .signal import Signal from .utils import ( MdfException, - get_fmt, - pair, - fmt_to_datatype, - get_unique_name, - get_min_max, + as_non_byte_sized_signed_int, fix_dtype_fields, + fmt_to_datatype_v3, + get_fmt_v3, + get_min_max, + get_unique_name, + get_text_v3, ) -from .signal import Signal -from .version import __version__ -from . import v3constants as v3c -from .v3blocks import ( +from .v2_v3_blocks import ( Channel, ChannelConversion, ChannelDependency, @@ -65,15 +65,13 @@ TextBlock, TriggerBlock, ) - - -get_fmt = partial(get_fmt, version=3) -fmt_to_datatype = partial(fmt_to_datatype, version=3) +from .version import __version__ PYVERSION = sys.version_info[0] if PYVERSION == 2: + # pylint: disable=W0622 from .utils import bytes - + # pylint: enable=W0622 __all__ = ['MDF3', ] @@ -95,7 +93,8 @@ class MDF3(object): * if *minimum* only minimal data is memorised into RAM version : string - mdf file version ('3.00', '3.10', '3.20' or '3.30'); default '3.30' + mdf file version ('2.00', '2.10', '2.14', '3.00', '3.10', '3.20' or + '3.30'); default '3.30' Attributes ---------- @@ -107,8 +106,8 @@ class MDF3(object): mdf file header file_history : TextBlock file history text block; can be None - memory : bool - load measured data option + memory : str + memory optimization option version : str mdf version channels_db : dict @@ -123,7 +122,7 @@ class MDF3(object): _compact_integers_on_append = False _overwrite = False - def __init__(self, name=None, memory=2, version='3.30'): + def __init__(self, name=None, memory='full', version='3.30'): self.groups = [] self.header = None self.identification = None @@ -140,6 +139,9 @@ def __init__(self, name=None, memory=2, version='3.30'): self._tempfile.write(b'\0') self._file = None + self.attachments = None + self.file_comment = None + if name: self._file = open(self.name, 'rb') self._read() @@ -158,7 +160,7 @@ def _load_group_data(self, group): # for now appended groups keep the measured data in the memory. # the plan is to use a temp file for appended groups, to keep the # memory usage low. - if group['data_location'] == v3c.LOCATION_ORIGINAL_FILE: + if group['data_location'] == v23c.LOCATION_ORIGINAL_FILE: # this is a group from the source file # so fetch the measured data from it stream = self._file @@ -176,6 +178,8 @@ def _load_group_data(self, group): else: read_size = group['size'] record_id = group['channel_group']['record_id'] + if PYVERSION == 2: + record_id = chr(record_id) cg_size = group['record_size'] if group['data_group']['record_id_nr'] <= 2: record_id_nr = group['data_group']['record_id_nr'] @@ -193,23 +197,23 @@ def _load_group_data(self, group): size = len(data) while i < size: rec_id = data[i] - # skip redord id + # skip record id i += 1 rec_size = cg_size[rec_id] if rec_id == record_id: - rec_data = data[i: i+rec_size] + rec_data = data[i: i + rec_size] cg_data.append(rec_data) - # concider the second record ID if it exists + # consider the second record ID if it exists if record_id_nr == 2: i += rec_size + 1 else: i += rec_size data = b''.join(cg_data) - elif group['data_location'] == v3c.LOCATION_TEMPORARY_FILE: + elif group['data_location'] == v23c.LOCATION_TEMPORARY_FILE: read_size = group['size'] dat_addr = group['data_group']['data_block_addr'] if dat_addr: - self._tempfile.seek(dat_addr, v3c.SEEK_START) + self._tempfile.seek(dat_addr, v23c.SEEK_START) data = self._tempfile.read(read_size) else: data = b'' @@ -275,18 +279,15 @@ def _prepare_record(self, group): data_type = new_ch['data_type'] bit_count = new_ch['bit_count'] if memory == 'minimum': - channel_texts = grp['texts']['channels'][original_index] - if channel_texts and 'long_name_addr' in channel_texts: - address = grp['texts']['channels'][original_index]['long_name_addr'] - - block = TextBlock( - address=address, - stream=stream, - ) - name = block['text'].decode('latin-1').strip(' \r\n\t\0') + if new_ch.get('long_name_addr', 0): + name = get_text_v3(new_ch['long_name_addr'], stream) else: - name = new_ch['short_name'].decode('latin-1').strip(' \r\n\t\0') - name = name.split('\\')[0] + name = ( + new_ch['short_name'] + .decode('latin-1') + .strip(' \r\n\t\0') + .split('\\')[0] + ) else: name = new_ch.name @@ -304,15 +305,15 @@ def _prepare_record(self, group): # adjust size to 1, 2, 4 or 8 bytes for nonstandard integers size = bit_offset + bit_count - if data_type == v3c.DATA_TYPE_STRING: + if data_type == v23c.DATA_TYPE_STRING: next_byte_aligned_position = parent_start_offset + size size = size // 8 if next_byte_aligned_position <= record_size: - dtype_pair = (name, get_fmt(data_type, size)) + dtype_pair = (name, get_fmt_v3(data_type, size)) types.append(dtype_pair) parents[original_index] = name, bit_offset - elif data_type == v3c.DATA_TYPE_BYTEARRAY: + elif data_type == v23c.DATA_TYPE_BYTEARRAY: size = size // 8 next_byte_aligned_position = parent_start_offset + size if next_byte_aligned_position <= record_size: @@ -335,7 +336,7 @@ def _prepare_record(self, group): size = 1 if next_byte_aligned_position <= record_size: - dtype_pair = (name, get_fmt(data_type, size)) + dtype_pair = (name, get_fmt_v3(data_type, size)) types.append(dtype_pair) parents[original_index] = name, bit_offset @@ -363,10 +364,10 @@ def _prepare_record(self, group): def _get_not_byte_aligned_data(self, data, group, ch_nr): big_endian_types = ( - v3c.DATA_TYPE_UNSIGNED_MOTOROLA, - v3c.DATA_TYPE_FLOAT_MOTOROLA, - v3c.DATA_TYPE_DOUBLE_MOTOROLA, - v3c.DATA_TYPE_SIGNED_MOTOROLA, + v23c.DATA_TYPE_UNSIGNED_MOTOROLA, + v23c.DATA_TYPE_FLOAT_MOTOROLA, + v23c.DATA_TYPE_DOUBLE_MOTOROLA, + v23c.DATA_TYPE_SIGNED_MOTOROLA, ) record_size = group['channel_group']['samples_byte_nr'] @@ -446,7 +447,7 @@ def _get_not_byte_aligned_data(self, data, group, ch_nr): vals = fromarrays([vals, extra], dtype=dtype(types)) vals = vals.tostring() - fmt = get_fmt(channel['data_type'], size) + fmt = get_fmt_v3(channel['data_type'], size) if size <= byte_count: types = [ ('vals', fmt), @@ -496,8 +497,10 @@ def _validate_channel_selection(self, name=None, group=None, index=None): """ if name is None: if group is None or index is None: - message = ('Invalid arguments for channel selection: ' - 'must give "name" or, "group" and "index"') + message = ( + 'Invalid arguments for channel selection: ' + 'must give "name" or, "group" and "index"' + ) raise MdfException(message) else: gp_nr, ch_nr = group, index @@ -513,45 +516,32 @@ def _validate_channel_selection(self, name=None, group=None, index=None): if group is None: gp_nr, ch_nr = self.channels_db[name][0] if len(self.channels_db[name]) > 1: - message = ('Multiple occurances for channel "{}". ' - 'Using first occurance from data group {}. ' - 'Provide both "group" and "index" arguments' - ' to select another data group') + message = ( + 'Multiple occurances for channel "{}". ' + 'Using first occurance from data group {}. ' + 'Provide both "group" and "index" arguments' + ' to select another data group' + ) message = message.format(name, gp_nr) warnings.warn(message) else: - group_valid = False for gp_nr, ch_nr in self.channels_db[name]: if gp_nr == group: - group_valid = True if index is None: break elif index == ch_nr: break else: - if group_valid: - gp_nr, ch_nr = self.channels_db[name][group] - message = ('You have selected channel index "{}"' - 'of group "{}" for channel "{}", but ' - 'this channel index is invalid. Using ' - 'first occurance of "{}" in this group' - ' at index "{}"') - message = message.format( - index, - group, - name, - name, - ch_nr, - ) + if index is None: + message = 'Channel "{}" not found in group {}' + message = message.format(name, group) else: - gp_nr, ch_nr = self.channels_db[name][0] - message = ('You have selected group "{}" for ' - 'channel "{}", but this channel was not' - ' found in this group, or this group ' - 'index does not exist. Using first ' - 'occurance of "{}" from group "{}"') - message = message.format(group, name, name, gp_nr) - warnings.warn(message) + message = ( + 'Channel "{}" not found in group {} ' + 'at index {}' + ) + message = message.format(name, group, index) + raise MdfException(message) return gp_nr, ch_nr def _read(self): @@ -563,16 +553,18 @@ def _read(self): seek = stream.seek dg_cntr = 0 - seek(0, v3c.SEEK_START) + seek(0, v23c.SEEK_START) self.identification = FileIdentificationBlock( stream=stream, ) self.header = HeaderBlock(stream=stream) - self.version = self.identification['version_str']\ - .decode('latin-1')\ + self.version = ( + self.identification['version_str'] + .decode('latin-1') .strip(' \n\t\0') + ) self.file_history = TextBlock( address=self.header['comment_addr'], @@ -580,25 +572,32 @@ def _read(self): ) # this will hold mapping from channel address to Channel object - # needed for linking dependecy blocks to refernced channels after + # needed for linking dependency blocks to referenced channels after # the file is loaded ch_map = {} + ce_map = {} + cc_map = {} # go to first date group dg_addr = self.header['first_dg_addr'] # read each data group sequentially while dg_addr: - gp = DataGroup(address=dg_addr, stream=stream) - record_id_nr = gp['record_id_nr'] - cg_nr = gp['cg_nr'] - cg_addr = gp['first_cg_addr'] - data_addr = gp['data_block_addr'] + data_group = DataGroup( + address=dg_addr, + stream=stream, + ) + record_id_nr = data_group['record_id_nr'] + cg_nr = data_group['cg_nr'] + cg_addr = data_group['first_cg_addr'] + data_addr = data_group['data_block_addr'] # read trigger information if available - trigger_addr = gp['trigger_addr'] + trigger_addr = data_group['trigger_addr'] if trigger_addr: - trigger = TriggerBlock(address=trigger_addr, - stream=stream) + trigger = TriggerBlock( + address=trigger_addr, + stream=stream, + ) if trigger['text_addr']: trigger_text = TextBlock( address=trigger['text_addr'], @@ -620,7 +619,6 @@ def _read(self): grp['channel_extensions'] = [] grp['data_block'] = None grp['texts'] = { - 'channels': [], 'conversion_tab': [], 'channel_group': [], } @@ -634,10 +632,11 @@ def _read(self): kargs = {'first_cg_addr': cg_addr, 'data_block_addr': data_addr} - if self.version in ('3.20', '3.30'): - kargs['block_len'] = v3c.DG32_BLOCK_SIZE + if self.version >= '3.20': + kargs['block_len'] = v23c.DG_POST_320_BLOCK_SIZE else: - kargs['block_len'] = v3c.DG31_BLOCK_SIZE + kargs['block_len'] = v23c.DG_PRE_320_BLOCK_SIZE + kargs['record_id_nr'] = record_id_nr grp['data_group'] = DataGroup(**kargs) @@ -667,7 +666,6 @@ def _read(self): ch_cntr = 0 grp_chs = grp['channels'] grp_conv = grp['channel_conversions'] - grp_ch_texts = grp['texts']['channels'] while ch_addr: # read channel block and create channel object @@ -692,14 +690,25 @@ def _read(self): # read conversion block address = new_ch['conversion_addr'] if address: - new_conv = ChannelConversion( - address=address, - stream=stream, - ) - if memory != 'minimum': - grp_conv.append(new_conv) - else: + stream.seek(address + 2, v23c.SEEK_START) + size = unpack('': + timestamps = timestamps.byteswap().newbyteorder() + for signal in signals: + if signal.samples.dtype.byteorder == '>': + signal.samples = signal.samples.byteswap().newbyteorder() + + if self.version >= '3.00': + channel_size = v23c.CN_DISPLAYNAME_BLOCK_SIZE + elif self.version >= '2.10': + channel_size = v23c.CN_LONGNAME_BLOCK_SIZE else: - t = t_ + channel_size = v23c.CN_SHORT_BLOCK_SIZE memory = self.memory file = self._tempfile write = file.write tell = file.tell + kargs = { + 'module_nr': 0, + 'module_address': 0, + 'type': v23c.SOURCE_ECU, + 'description': b'Channel inserted by Python Script', + } + ce_block = ChannelExtension(**kargs) + if memory == 'minimum': + ce_address = tell() + write(bytes(ce_block)) + + if acquisition_info: + acq_block = TextBlock(text=acquisition_info) + if memory == 'minimum': + acq_address = tell() + write(bytes(acq_block)) + else: + acq_block = None + acq_address = 0 + # split regular from composed signals. Composed signals have recarray # samples or multimendional ndarray. # The regular signals will be first added to the group. @@ -1006,26 +1040,26 @@ def append(self, # be saved as new signals. simple_signals = [ sig for sig in signals - if len(sig.samples.shape) <= 1 and - sig.samples.dtype.names is None + if len(sig.samples.shape) <= 1 + and sig.samples.dtype.names is None ] composed_signals = [ sig for sig in signals - if len(sig.samples.shape) > 1 or - sig.samples.dtype.names + if len(sig.samples.shape) > 1 + or sig.samples.dtype.names ] # mdf version 4 structure channels and CANopen types will be saved to # new channel groups new_groups_signals = [ sig for sig in composed_signals - if sig.samples.dtype.names and - sig.samples.dtype.names[0] != sig.name + if sig.samples.dtype.names + and sig.samples.dtype.names[0] != sig.name ] composed_signals = [ sig for sig in composed_signals - if not sig.samples.dtype.names or - sig.samples.dtype.names[0] == sig.name + if not sig.samples.dtype.names + or sig.samples.dtype.names[0] == sig.name ] if simple_signals or composed_signals: @@ -1037,13 +1071,12 @@ def append(self, gp['channel_extensions'] = gp_source = [] gp['channel_dependencies'] = gp_dep = [] gp['texts'] = gp_texts = { - 'channels': [], 'conversion_tab': [], 'channel_group': [], } self.groups.append(gp) - cycles_nr = len(t) + cycles_nr = len(timestamps) fields = [] types = [] parents = {} @@ -1051,27 +1084,22 @@ def append(self, offset = 0 field_names = set() - # setup all blocks related to the time master channel + cg_texts = {} + gp_texts['channel_group'].append(cg_texts) + if memory == 'minimum': + cg_texts['comment_addr'] = acq_address + else: + cg_texts['comment_addr'] = acq_block # time channel texts - for _, item in gp_texts.items(): - item.append(None) - - gp_texts['channel_group'][-1] = {} - block = TextBlock(text=acquisition_info) - if memory != 'minimum': - gp_texts['channel_group'][-1]['comment_addr'] = block - else: - address = tell() - gp_texts['channel_group'][-1]['comment_addr'] = address - write(bytes(block)) + gp_texts['conversion_tab'].append(None) # conversion for time channel kargs = { - 'conversion_type': v3c.CONVERSION_TYPE_NONE, + 'conversion_type': v23c.CONVERSION_TYPE_NONE, 'unit': b's', - 'min_phy_value': t[0] if cycles_nr else 0, - 'max_phy_value': t[-1] if cycles_nr else 0, + 'min_phy_value': timestamps[0] if cycles_nr else 0, + 'max_phy_value': timestamps[-1] if cycles_nr else 0, } block = ChannelConversion(**kargs) if memory != 'minimum': @@ -1082,30 +1110,22 @@ def append(self, write(bytes(block)) # source for time - kargs = { - 'module_nr': 0, - 'module_address': 0, - 'type': v3c.SOURCE_ECU, - 'description': b'Channel inserted by Python Script', - } - block = ChannelExtension(**kargs) if memory != 'minimum': - gp_source.append(block) + gp_source.append(ce_block) else: - address = tell() - gp_source.append(address) - write(bytes(block)) + gp_source.append(ce_address) # time channel - t_type, t_size = fmt_to_datatype(t.dtype) + t_type, t_size = fmt_to_datatype_v3(timestamps.dtype) kargs = { 'short_name': b't', - 'channel_type': v3c.CHANNEL_TYPE_MASTER, + 'channel_type': v23c.CHANNEL_TYPE_MASTER, 'data_type': t_type, 'start_offset': 0, - 'min_raw_value': t[0] if cycles_nr else 0, - 'max_raw_value': t[-1] if cycles_nr else 0, + 'min_raw_value': timestamps[0] if cycles_nr else 0, + 'max_raw_value': timestamps[-1] if cycles_nr else 0, 'bit_count': t_size, + 'block_len': channel_size, } channel = Channel(**kargs) channel.name = name = 't' @@ -1126,14 +1146,14 @@ def append(self, # time channel doesn't have channel dependencies gp_dep.append(None) - fields.append(t) - types.append((name, t.dtype)) + fields.append(timestamps) + types.append((name, timestamps.dtype)) field_names.add(name) offset += t_size ch_cntr += 1 - if self._compact_integers_on_append: + if self._compact_integers_on_append and self.version >= '3.10': compacted_signals = [ {'signal': sig} for sig in simple_signals @@ -1153,6 +1173,7 @@ def append(self, int(max_).bit_length(), int(min_).bit_length(), ) + bit_length += 1 signal['bit_count'] = max(minimum_bitlength, bit_length) @@ -1205,26 +1226,12 @@ def append(self, max_val = signal_d['max'] name = signal.name - for _, item in gp['texts'].items(): - item.append(None) - - texts = {} - if len(name) >= 32: - block = TextBlock(text=name) - if memory != 'minimum': - texts['long_name_addr'] = block - else: - address = tell() - texts['long_name_addr'] = address - write(bytes(block)) - if texts: - gp_texts['channels'][-1] = texts texts = {} info = signal.info - if info and 'raw' in info and not info['raw'].dtype.kind == 'S': + if info and 'raw' in info and info['raw'].dtype.kind != 'S': kargs = {} - kargs['conversion_type'] = v3c.CONVERSION_TYPE_VTAB + kargs['conversion_type'] = v23c.CONVERSION_TYPE_VTAB raw = info['raw'] phys = info['phys'] for i, (r_, p_) in enumerate(zip(raw, phys)): @@ -1234,14 +1241,14 @@ def append(self, kargs['unit'] = signal.unit.encode('latin-1') elif info and 'lower' in info: kargs = {} - kargs['conversion_type'] = v3c.CONVERSION_TYPE_VTABR + kargs['conversion_type'] = v23c.CONVERSION_TYPE_VTABR lower = info['lower'] upper = info['upper'] - texts = info['phys'] + texts_ = info['phys'] kargs['unit'] = signal.unit.encode('latin-1') kargs['ref_param_nr'] = len(upper) - for i, vals in enumerate(zip(upper, lower, texts)): + for i, vals in enumerate(zip(upper, lower, texts_)): u_, l_, t_ = vals kargs['lower_{}'.format(i)] = l_ kargs['upper_{}'.format(i)] = u_ @@ -1255,7 +1262,6 @@ def append(self, address = tell() texts[key] = address write(bytes(block)) - else: if min_val <= max_val: min_phy_value = min_val @@ -1264,14 +1270,16 @@ def append(self, min_phy_value = 0 max_phy_value = 0 kargs = { - 'conversion_type': v3c.CONVERSION_TYPE_NONE, + 'conversion_type': v23c.CONVERSION_TYPE_NONE, 'unit': signal.unit.encode('latin-1'), 'min_phy_value': min_phy_value, 'max_phy_value': max_phy_value, } if texts: - gp_texts['conversion_tab'][-1] = texts + gp_texts['conversion_tab'].append(texts) + else: + gp_texts['conversion_tab'].append(None) block = ChannelConversion(**kargs) if memory != 'minimum': @@ -1282,25 +1290,16 @@ def append(self, write(bytes(block)) # source for channel - kargs = { - 'module_nr': 0, - 'module_address': 0, - 'type': v3c.SOURCE_ECU, - 'description': b'Channel inserted by Python Script', - } - block = ChannelExtension(**kargs) if memory != 'minimum': - gp_source.append(block) + gp_source.append(ce_block) else: - address = tell() - gp_source.append(address) - write(bytes(block)) + gp_source.append(ce_address) # compute additional byte offset for large records size current_offset = offset + bit_offset - if current_offset > v3c.MAX_UINT16: + if current_offset > v23c.MAX_UINT16: additional_byte_offset = \ - (current_offset - v3c.MAX_UINT16) >> 3 + (current_offset - v23c.MAX_UINT16) >> 3 start_bit_offset = \ current_offset - additional_byte_offset << 3 else: @@ -1308,47 +1307,54 @@ def append(self, additional_byte_offset = 0 if signal.samples.dtype.kind == 'u': - data_type = v3c.DATA_TYPE_UNSIGNED_INTEL + data_type = v23c.DATA_TYPE_UNSIGNED else: - data_type = v3c.DATA_TYPE_SIGNED_INTEL + data_type = v23c.DATA_TYPE_SIGNED - texts = {} - if len(name) >= 32: - short_name = (name[:31] + '\0').encode('latin-1') - if memory != 'minimum': - texts['long_name_addr'] = TextBlock(texts=name) + if memory == 'minimum' and len(name) >= 32 and self.version >= '2.10': + block = TextBlock(text=name) + long_name_address = tell() + write(bytes(block)) + else: + long_name_address = 0 + comment = signal.comment + if comment: + if len(comment) >= 128: + description = b'\0' + if memory == 'minimum': + block = TextBlock(text=comment) + comment_address = tell() + write(bytes(block)) + else: + comment_address = 0 else: - address = tell() - texts['long_name_addr'] = address - block = TextBlock(texts=name) - gp_channels.append(address) - write(bytes(block)) + description = (comment[:127] + '\0').encode('latin-1') + comment_address = 0 else: - short_name = name.encode('latin-1') - - if texts: - gp_texts['channels'][-1] = texts + description = b'\0' + comment_address = 0 + short_name = (name[:31] + '\0').encode('latin-1') kargs = { 'short_name': short_name, - 'channel_type': v3c.CHANNEL_TYPE_VALUE, + 'channel_type': v23c.CHANNEL_TYPE_VALUE, 'data_type': data_type, 'min_raw_value': min_val if min_val <= max_val else 0, 'max_raw_value': max_val if min_val <= max_val else 0, 'start_offset': start_bit_offset, 'bit_count': bit_count, 'aditional_byte_offset': additional_byte_offset, + 'long_name_addr': long_name_address, + 'block_len': channel_size, + 'comment_addr': comment_address, + 'description': description, } - comment = signal.comment - if comment: - comment = comment.encode('latin-1') - if len(comment) >= 128: - comment = comment[:127] + b'\0' - kargs['description'] = comment channel = Channel(**kargs) - channel.name = name + if memory != 'minimum': + channel.name = name + channel.comment = signal.comment gp_channels.append(channel) else: address = tell() @@ -1375,31 +1381,15 @@ def append(self, # first add the signals in the simple signal list for signal in simple_signals: - # channels texts name = signal.name - for _, item in gp['texts'].items(): - item.append(None) - - texts = {} - if len(name) >= 32: - block = TextBlock(text=name) - if memory != 'minimum': - texts['long_name_addr'] = block - else: - address = tell() - texts['long_name_addr'] = address - write(bytes(block)) - if texts: - gp_texts['channels'][-1] = texts - # conversions for channel min_val, max_val = get_min_max(signal.samples) texts = {} info = signal.info - if info and 'raw' in info and not info['raw'].dtype.kind == 'S': + if info and 'raw' in info and info['raw'].dtype.kind != 'S': kargs = {} - kargs['conversion_type'] = v3c.CONVERSION_TYPE_VTAB + kargs['conversion_type'] = v23c.CONVERSION_TYPE_VTAB raw = info['raw'] phys = info['phys'] for i, (r_, p_) in enumerate(zip(raw, phys)): @@ -1409,7 +1399,7 @@ def append(self, kargs['unit'] = signal.unit.encode('latin-1') elif info and 'lower' in info: kargs = {} - kargs['conversion_type'] = v3c.CONVERSION_TYPE_VTABR + kargs['conversion_type'] = v23c.CONVERSION_TYPE_VTABR lower = info['lower'] upper = info['upper'] texts_ = info['phys'] @@ -1429,7 +1419,6 @@ def append(self, address = tell() texts[key] = address write(bytes(block)) - else: if min_val <= max_val: min_phy_value = min_val @@ -1438,14 +1427,16 @@ def append(self, min_phy_value = 0 max_phy_value = 0 kargs = { - 'conversion_type': v3c.CONVERSION_TYPE_NONE, + 'conversion_type': v23c.CONVERSION_TYPE_NONE, 'unit': signal.unit.encode('latin-1'), 'min_phy_value': min_phy_value, 'max_phy_value': max_phy_value, } if texts: - gp_texts['conversion_tab'][-1] = texts + gp_texts['conversion_tab'].append(texts) + else: + gp_texts['conversion_tab'].append(None) block = ChannelConversion(**kargs) if memory != 'minimum': @@ -1456,54 +1447,63 @@ def append(self, write(bytes(block)) # source for channel - kargs = { - 'module_nr': 0, - 'module_address': 0, - 'type': v3c.SOURCE_ECU, - 'description': b'Channel inserted by Python Script', - } - block = ChannelExtension(**kargs) if memory != 'minimum': - gp_source.append(block) + gp_source.append(ce_block) else: - address = tell() - gp_source.append(address) - write(bytes(block)) + gp_source.append(ce_address) # compute additional byte offset for large records size - if offset > v3c.MAX_UINT16: - additional_byte_offset = (offset - v3c.MAX_UINT16) >> 3 + if offset > v23c.MAX_UINT16: + additional_byte_offset = (offset - v23c.MAX_UINT16) >> 3 start_bit_offset = offset - additional_byte_offset << 3 else: start_bit_offset = offset additional_byte_offset = 0 - s_type, s_size = fmt_to_datatype(signal.samples.dtype) + s_type, s_size = fmt_to_datatype_v3(signal.samples.dtype) - if len(name) >= 32: - short_name = (name[:31] + '\0').encode('latin-1') + if memory == 'minimum' and len(name) >= 32 and self.version >= '2.10': + block = TextBlock(text=name) + long_name_address = tell() + write(bytes(block)) + else: + long_name_address = 0 + comment = signal.comment + if comment: + if len(comment) >= 128: + description = b'\0' + if memory == 'minimum': + block = TextBlock(text=comment) + comment_address = tell() + write(bytes(block)) + else: + comment_address = 0 + else: + description = (comment[:127] + '\0').encode('latin-1') + comment_address = 0 else: - short_name = name.encode('latin-1') + description = b'\0' + comment_address = 0 + short_name = (name[:31] + '\0').encode('latin-1') + kargs = { 'short_name': short_name, - 'channel_type': v3c.CHANNEL_TYPE_VALUE, + 'channel_type': v23c.CHANNEL_TYPE_VALUE, 'data_type': s_type, 'min_raw_value': min_val if min_val <= max_val else 0, 'max_raw_value': max_val if min_val <= max_val else 0, 'start_offset': start_bit_offset, 'bit_count': s_size, 'aditional_byte_offset': additional_byte_offset, + 'long_name_addr': long_name_address, + 'block_len': channel_size, + 'comment_addr': comment_address, + 'description': description, } - comment = signal.comment - if comment: - if len(comment) >= 128: - comment = (comment[:127] + '\0').encode('latin-1') - else: - comment = comment.encode('latin-1') - kargs['description'] = comment channel = Channel(**kargs) - channel.name = name if memory != 'minimum': + channel.name = name + channel.comment = signal.comment gp_channels.append(channel) else: address = tell() @@ -1566,21 +1566,7 @@ def append(self, component_samples.extend(new_samples) component_names.extend(names[1:]) - # add composed parent signal texts - for _, item in gp['texts'].items(): - item.append(None) - - texts = {} - if len(name) >= 32: - block = TextBlock(text=name) - if memory != 'minimum': - texts['long_name_addr'] = block - else: - address = tell() - texts['long_name_addr'] = address - write(bytes(block)) - if texts: - gp_texts['channels'][-1] = texts + gp_texts['conversion_tab'].append(None) # composed parent has no conversion if memory != 'minimum': @@ -1588,58 +1574,66 @@ def append(self, else: gp_conv.append(0) - # add parent and components sources - kargs = { - 'module_nr': 0, - 'module_address': 0, - 'type': v3c.SOURCE_ECU, - 'description': b'Channel inserted by Python Script', - } - block = ChannelExtension(**kargs) + # source for channel if memory != 'minimum': - gp_source.append(block) + gp_source.append(ce_block) else: - address = tell() - gp_source.append(address) - write(bytes(block)) + gp_source.append(ce_address) min_val, max_val = get_min_max(samples) - s_type, s_size = fmt_to_datatype(samples.dtype) + s_type, s_size = fmt_to_datatype_v3(samples.dtype) # compute additional byte offset for large records size - if offset > v3c.MAX_UINT16: - additional_byte_offset = (offset - v3c.MAX_UINT16) >> 3 + if offset > v23c.MAX_UINT16: + additional_byte_offset = (offset - v23c.MAX_UINT16) >> 3 start_bit_offset = offset - additional_byte_offset << 3 else: start_bit_offset = offset additional_byte_offset = 0 - if len(name) >= 32: - short_name = (name[:31] + '\0').encode('latin-1') + if memory == 'minimum' and len(name) >= 32 and self.version >= '2.10': + block = TextBlock(text=name) + long_name_address = tell() + write(bytes(block)) + else: + long_name_address = 0 + comment = signal.comment + if comment: + if len(comment) >= 128: + description = b'\0' + if memory == 'minimum': + block = TextBlock(text=comment) + comment_address = tell() + write(bytes(block)) + else: + comment_address = 0 + else: + description = (comment[:127] + '\0').encode('latin-1') + comment_address = 0 else: - short_name = name.encode('latin-1') + description = b'\0' + comment_address = 0 + short_name = (name[:31] + '\0').encode('latin-1') + kargs = { 'short_name': short_name, - 'channel_type': v3c.CHANNEL_TYPE_VALUE, + 'channel_type': v23c.CHANNEL_TYPE_VALUE, 'data_type': s_type, 'min_raw_value': min_val if min_val <= max_val else 0, 'max_raw_value': max_val if min_val <= max_val else 0, 'start_offset': start_bit_offset, 'bit_count': s_size, 'aditional_byte_offset': additional_byte_offset, + 'long_name_addr': long_name_address, + 'block_len': channel_size, + 'comment_addr': comment_address, + 'description': description, } - comment = signal.comment - if comment: - if len(comment) >= 128: - comment = (comment[:127] + '\0').encode('latin-1') - else: - comment = comment.encode('latin-1') - kargs['description'] = comment channel = Channel(**kargs) - channel.name = name - channel.name = name if memory != 'minimum': + channel.name = name + channel.comment = signal.comment gp_channels.append(channel) else: address = tell() @@ -1652,40 +1646,35 @@ def append(self, ch_cntr += 1 - for i, (name, samples) in enumerate(zip(component_names, - component_samples)): - for _, item in gp['texts'].items(): - item.append(None) + for i, (name, samples) in enumerate( + zip(component_names, component_samples)): + gp_texts['conversion_tab'].append(None) - texts = {} - if len(name) >= 32: + if memory == 'minimum' and len(name) >= 32 and self.version >= '2.10': block = TextBlock(text=name) - if memory != 'minimum': - texts['long_name_addr'] = block - else: - address = tell() - texts['long_name_addr'] = address - write(bytes(block)) - if texts: - gp_texts['channels'][-1] = texts + long_name_address = tell() + write(bytes(block)) + else: + long_name_address = 0 + if i < sd_nr: + dep_pair = ch_cntr, dg_cntr + parent_dep.referenced_channels.append(dep_pair) + description = b'\0' + else: + description = '{} - axis {}'.format(signal.name, name) + description = description.encode('latin-1') + comment_address = 0 + short_name = (name[:31] + '\0').encode('latin-1') min_val, max_val = get_min_max(samples) - s_type, s_size = fmt_to_datatype(samples.dtype) + s_type, s_size = fmt_to_datatype_v3(samples.dtype) shape = samples.shape[1:] - kargs = { - 'module_nr': 0, - 'module_address': 0, - 'type': v3c.SOURCE_ECU, - 'description': b'Channel inserted by Python Script', - } - block = ChannelExtension(**kargs) + # source for channel if memory != 'minimum': - gp_source.append(block) + gp_source.append(ce_block) else: - address = tell() - gp_source.append(address) - write(bytes(block)) + gp_source.append(ce_address) if memory != 'minimum': gp_conv.append(None) @@ -1693,36 +1682,37 @@ def append(self, gp_conv.append(0) # compute additional byte offset for large records size - if offset > v3c.MAX_UINT16: - additional_byte_offset = (offset - v3c.MAX_UINT16) >> 3 + if offset > v23c.MAX_UINT16: + additional_byte_offset = (offset - v23c.MAX_UINT16) >> 3 start_bit_offset = offset - additional_byte_offset << 3 else: start_bit_offset = offset additional_byte_offset = 0 - if len(name) >= 32: - short_name = (name[:31] + '\0').encode('latin-1') - else: - short_name = name.encode('latin-1') kargs = { 'short_name': short_name, - 'channel_type': v3c.CHANNEL_TYPE_VALUE, + 'channel_type': v23c.CHANNEL_TYPE_VALUE, 'data_type': s_type, 'min_raw_value': min_val if min_val <= max_val else 0, 'max_raw_value': max_val if min_val <= max_val else 0, 'start_offset': start_bit_offset, 'bit_count': s_size, 'aditional_byte_offset': additional_byte_offset, + 'long_name_addr': long_name_address, + 'block_len': channel_size, + 'comment_addr': comment_address, + 'description': description, } channel = Channel(**kargs) - channel.name = name if memory != 'minimum': + channel.name = name gp_channels.append(channel) else: address = tell() gp_channels.append(address) write(bytes(channel)) + size = s_size for dim in shape: size *= dim @@ -1742,14 +1732,6 @@ def append(self, gp_dep.append(None) - if i < sd_nr: - dep_pair = ch_cntr, dg_cntr - parent_dep.referenced_channels.append(dep_pair) - else: - description = '{} - axis {}'.format(signal.name, name) - description = description.encode('latin-1') - channel['description'] = description - ch_cntr += 1 # channel group @@ -1762,10 +1744,10 @@ def append(self, gp['size'] = cycles_nr * (offset >> 3) # data group - if self.version in ('3.20', '3.30'): - block_len = v3c.DG32_BLOCK_SIZE + if self.version >= '3.20': + block_len = v23c.DG_POST_320_BLOCK_SIZE else: - block_len = v3c.DG31_BLOCK_SIZE + block_len = v23c.DG_PRE_320_BLOCK_SIZE gp['data_group'] = DataGroup(block_len=block_len) # data block @@ -1781,11 +1763,11 @@ def append(self, block = samples.tostring() if memory == 'full': - gp['data_location'] = v3c.LOCATION_MEMORY + gp['data_location'] = v23c.LOCATION_MEMORY kargs = {'data': block} gp['data_block'] = DataBlock(**kargs) else: - gp['data_location'] = v3c.LOCATION_TEMPORARY_FILE + gp['data_location'] = v23c.LOCATION_TEMPORARY_FILE if cycles_nr: data_address = tell() gp['data_group']['data_block_addr'] = data_address @@ -1804,13 +1786,12 @@ def append(self, gp['channel_extensions'] = gp_source = [] gp['channel_dependencies'] = gp_dep = [] gp['texts'] = gp_texts = { - 'channels': [], 'conversion_tab': [], 'channel_group': [], } self.groups.append(gp) - cycles_nr = len(t) + cycles_nr = len(timestamps) fields = [] types = [] parents = {} @@ -1818,52 +1799,41 @@ def append(self, offset = 0 field_names = set() - # setup all blocks related to the time master channel - # time channel texts - for _, item in gp_texts.items(): - item.append(None) + gp_texts['conversion_tab'].append(None) # conversion for time channel kargs = { - 'conversion_type': v3c.CONVERSION_TYPE_NONE, + 'conversion_type': v23c.CONVERSION_TYPE_NONE, 'unit': b's', - 'min_phy_value': t[0] if cycles_nr else 0, - 'max_phy_value': t[-1] if cycles_nr else 0, + 'min_phy_value': timestamps[0] if cycles_nr else 0, + 'max_phy_value': timestamps[-1] if cycles_nr else 0, } block = ChannelConversion(**kargs) - if memory == 'minimum': + if memory != 'minimum': + gp_conv.append(block) + else: address = tell() - write(bytes(block)) gp_conv.append(address) - else: - gp_conv.append(ChannelConversion(**kargs)) + write(bytes(block)) # source for time - kargs = { - 'module_nr': 0, - 'module_address': 0, - 'type': v3c.SOURCE_ECU, - 'description': b'Channel inserted by Python Script', - } - block = ChannelExtension(**kargs) if memory != 'minimum': - gp_source.append(block) + gp_source.append(ce_block) else: - address = tell() - gp_source.append(address) - write(bytes(block)) + gp_source.append(ce_address) # time channel - t_type, t_size = fmt_to_datatype(t.dtype) + t_type, t_size = fmt_to_datatype_v3(timestamps.dtype) kargs = { 'short_name': b't', - 'channel_type': v3c.CHANNEL_TYPE_MASTER, + 'channel_type': v23c.CHANNEL_TYPE_MASTER, 'data_type': t_type, 'start_offset': 0, - 'min_raw_value': t[0] if cycles_nr else 0, - 'max_raw_value': t[-1] if cycles_nr else 0, + 'min_raw_value': timestamps[0] if cycles_nr else 0, + 'max_raw_value': timestamps[-1] if cycles_nr else 0, 'bit_count': t_size, + 'block_len': channel_size, } channel = Channel(**kargs) channel.name = name = 't' @@ -1884,68 +1854,70 @@ def append(self, # time channel doesn't have channel dependencies gp_dep.append(None) - fields.append(t) - types.append((name, t.dtype)) + fields.append(timestamps) + types.append((name, timestamps.dtype)) field_names.add(name) offset += t_size ch_cntr += 1 names = signal.samples.dtype.names - if names == ('ms', - 'days'): - block = TextBlock(text='From mdf v4 CANopen Time channel') + if names == ( + 'ms', + 'days'): + acq_block = TextBlock(text='From mdf v4 CANopen Time channel') if memory == 'minimum': - address = tell() + acq_address = tell() write(bytes(block)) - gp_texts['channel_group'][-1] = {'comment_addr': address} + gp_texts['channel_group'].append({'comment_addr': acq_address}) else: - gp_texts['channel_group'][-1] = {'comment_addr': block} - elif names == ('ms', - 'min', - 'hour', - 'day', - 'month', - 'year', - 'summer_time', - 'day_of_week'): - block = TextBlock(text='From mdf v4 CANopen Date channel') + gp_texts['channel_group'].append({'comment_addr': acq_block}) + elif names == ( + 'ms', + 'min', + 'hour', + 'day', + 'month', + 'year', + 'summer_time', + 'day_of_week'): + acq_block = TextBlock(text='From mdf v4 CANopen Date channel') if memory == 'minimum': - address = tell() + acq_address = tell() write(bytes(block)) - gp_texts['channel_group'][-1] = {'comment_addr': address} + gp_texts['channel_group'].append({'comment_addr': acq_address}) else: - gp_texts['channel_group'][-1] = {'comment_addr': block} + gp_texts['channel_group'].append({'comment_addr': acq_block}) else: text = 'From mdf v4 structure channel composition' - block = TextBlock(text=text) + acq_block = TextBlock(text=text) if memory == 'minimum': - address = tell() + acq_address = tell() write(bytes(block)) - gp_texts['channel_group'][-1] = {'comment_addr': address} + gp_texts['channel_group'].append({'comment_addr': acq_address}) else: - gp_texts['channel_group'][-1] = {'comment_addr': block} + gp_texts['channel_group'].append({'comment_addr': acq_block}) for name in names: samples = signal.samples[name] - # channels texts - for _, item in gp['texts'].items(): - item.append(None) + gp_texts['conversion_tab'].append(None) - texts = {} - if len(name) >= 32: + if memory == 'minimum' and len(name) >= 32 and self.version >= '2.10': block = TextBlock(text=name) - texts['long_name_addr'] = block - if texts: - gp_texts['channels'][-1] = texts + long_name_address = tell() + write(bytes(block)) + else: + long_name_address = 0 + comment_address = 0 + short_name = (name[:31] + '\0').encode('latin-1') # conversions for channel min_val, max_val = get_min_max(samples) kargs = { - 'conversion_type': v3c.CONVERSION_TYPE_NONE, + 'conversion_type': v23c.CONVERSION_TYPE_NONE, 'unit': signal.unit.encode('latin-1'), 'min_phy_value': min_val if min_val <= max_val else 0, 'max_phy_value': max_val if min_val <= max_val else 0, @@ -1959,46 +1931,39 @@ def append(self, write(bytes(block)) # source for channel - kargs = { - 'module_nr': 0, - 'module_address': 0, - 'type': v3c.SOURCE_ECU, - 'description': b'Channel inserted by Python Script', - } - block = ChannelExtension(**kargs) if memory != 'minimum': - gp_source.append(block) + gp_source.append(ce_block) else: - address = tell() - gp_source.append(address) - write(bytes(block)) + gp_source.append(ce_address) # compute additional byte offset for large records size - if offset > v3c.MAX_UINT16: - additional_byte_offset = (offset - v3c.MAX_UINT16) >> 3 + if offset > v23c.MAX_UINT16: + additional_byte_offset = (offset - v23c.MAX_UINT16) >> 3 start_bit_offset = offset - additional_byte_offset << 3 else: start_bit_offset = offset additional_byte_offset = 0 - s_type, s_size = fmt_to_datatype(samples.dtype) - if len(name) >= 32: - short_name = (name[:31] + '\0').encode('latin-1') - else: - short_name = name.encode('latin-1') + s_type, s_size = fmt_to_datatype_v3(samples.dtype) + kargs = { 'short_name': short_name, - 'channel_type': v3c.CHANNEL_TYPE_VALUE, + 'channel_type': v23c.CHANNEL_TYPE_VALUE, 'data_type': s_type, 'min_raw_value': min_val if min_val <= max_val else 0, 'max_raw_value': max_val if min_val <= max_val else 0, 'start_offset': start_bit_offset, 'bit_count': s_size, 'aditional_byte_offset': additional_byte_offset, + 'long_name_addr': long_name_address, + 'block_len': channel_size, + 'comment_addr': comment_address, + 'description': description, } channel = Channel(**kargs) - channel.name = name + if memory != 'minimum': + channel.name = name gp_channels.append(channel) else: address = tell() @@ -2033,10 +1998,10 @@ def append(self, gp['size'] = cycles_nr * (offset >> 3) # data group - if self.version in ('3.20', '3.30'): - block_len = v3c.DG32_BLOCK_SIZE + if self.version >= '3.20': + block_len = v23c.DG_POST_320_BLOCK_SIZE else: - block_len = v3c.DG31_BLOCK_SIZE + block_len = v23c.DG_PRE_320_BLOCK_SIZE gp['data_group'] = DataGroup(block_len=block_len) # data block @@ -2053,11 +2018,11 @@ def append(self, block = samples.tostring() if memory == 'full': - gp['data_location'] = v3c.LOCATION_MEMORY + gp['data_location'] = v23c.LOCATION_MEMORY kargs = {'data': block} gp['data_block'] = DataBlock(**kargs) else: - gp['data_location'] = v3c.LOCATION_TEMPORARY_FILE + gp['data_location'] = v23c.LOCATION_TEMPORARY_FILE if cycles_nr: data_address = tell() gp['data_group']['data_block_addr'] = data_address @@ -2068,7 +2033,7 @@ def append(self, if memory == 'full': raise else: - gp['data_location'] = v3c.LOCATION_TEMPORARY_FILE + gp['data_location'] = v23c.LOCATION_TEMPORARY_FILE data_address = tell() gp['data_group']['data_block_addr'] = data_address @@ -2132,7 +2097,7 @@ def get_channel_unit(self, name=None, group=None, index=None): ) grp = self.groups[gp_nr] - if grp['data_location'] == v3c.LOCATION_ORIGINAL_FILE: + if grp['data_location'] == v23c.LOCATION_ORIGINAL_FILE: stream = self._file else: stream = self._tempfile @@ -2151,7 +2116,11 @@ def get_channel_unit(self, name=None, group=None, index=None): conversion = grp['channel_conversions'][ch_nr] if conversion: - unit = conversion['unit'].decode('latin-1').strip(' \n\t\0') + unit = ( + conversion['unit'] + .decode('latin-1') + .strip(' \n\t\0') + ) else: unit = '' @@ -2199,7 +2168,7 @@ def get_channel_comment(self, name=None, group=None, index=None): ) grp = self.groups[gp_nr] - if grp['data_location'] == v3c.LOCATION_ORIGINAL_FILE: + if grp['data_location'] == v23c.LOCATION_ORIGINAL_FILE: stream = self._file else: stream = self._tempfile @@ -2212,8 +2181,21 @@ def get_channel_comment(self, name=None, group=None, index=None): else: channel = grp['channels'][ch_nr] - comment = channel['description'].decode('latin-1') - comment = comment.strip(' \t\n\0') + if self.memory == 'minimum': + comment = '' + if channel['comment_addr']: + comment = get_text_v3(channel['comment_addr'], stream) + else: + comment = channel.comment + description = ( + channel['description'] + .decode('latin-1') + .strip(' \t\n\0') + ) + if comment: + comment = '{}\n{}'.format(comment, description) + else: + comment = description return comment @@ -2223,7 +2205,8 @@ def get(self, index=None, raster=None, samples_only=False, - data=None): + data=None, + raw=False): """Gets channel samples. Channel can be specified in two ways: @@ -2256,6 +2239,12 @@ def get(self, samples_only : bool if *True* return only the channel samples as numpy array; if *False* return a *Signal* object + data : bytes + prevent redundant data read by providing the raw data group samples + raw : bool + return channel samples without appling the conversion rule; default + `False` + Returns ------- @@ -2270,12 +2259,68 @@ def get(self, Raises ------ - MdfError : + MdfException : * if the channel name is not found * if the group index is out of range * if the channel index is out of range + Examples + -------- + >>> from asammdf import MDF, Signal + >>> import numpy as np + >>> t = np.arange(5) + >>> s = np.ones(5) + >>> mdf = MDF(version='3.30') + >>> for i in range(4): + ... sigs = [Signal(s*(i*10+j), t, name='Sig') for j in range(1, 4)] + ... mdf.append(sigs) + ... + >>> # first group and channel index of the specified channel name + ... + >>> mdf.get('Sig') + UserWarning: Multiple occurances for channel "Sig". Using first occurance from data group 4. Provide both "group" and "index" arguments to select another data group + + >>> # first channel index in the specified group + ... + >>> mdf.get('Sig', 1) + + >>> # channel named Sig from group 1 channel index 2 + ... + >>> mdf.get('Sig', 1, 2) + + >>> # channel index 1 or group 2 + ... + >>> mdf.get(None, 2, 1) + + >>> mdf.get(group=2, index=1) + + """ gp_nr, ch_nr = self._validate_channel_selection( name, @@ -2286,7 +2331,7 @@ def get(self, memory = self.memory grp = self.groups[gp_nr] - if grp['data_location'] == v3c.LOCATION_ORIGINAL_FILE: + if grp['data_location'] == v23c.LOCATION_ORIGINAL_FILE: stream = self._file else: stream = self._tempfile @@ -2312,16 +2357,15 @@ def get(self, else: conversion = None if name is None: - if channel['long_name_addr']: - name = TextBlock( - address=channel['long_name_addr'], - stream=stream, - ) - name = name['text'] + if channel.get('long_name_addr', 0): + name = get_text_v3(channel['long_name_addr'], stream) else: - name = channel['short_name'] - name = name.decode('utf-8').strip(' \r\t\n\0') - name = name.split('\\')[0] + name = ( + channel['short_name'] + .decode('latin-1') + .strip(' \n\t\0') + .split('\\')[0] + ) channel.name = name dep = grp['channel_dependencies'][ch_nr] @@ -2335,9 +2379,9 @@ def get(self, # check if this is a channel array if dep: - if dep['dependency_type'] == v3c.DEPENDENCY_TYPE_VECTOR: + if dep['dependency_type'] == v23c.DEPENDENCY_TYPE_VECTOR: shape = [dep['sd_nr'], ] - elif dep['dependency_type'] >= v3c.DEPENDENCY_TYPE_NDIM: + elif dep['dependency_type'] >= v23c.DEPENDENCY_TYPE_NDIM: shape = [] i = 0 while True: @@ -2352,7 +2396,7 @@ def get(self, record_shape = tuple(shape) arrays = [ - self.get(group=dg_nr, index=ch_nr, samples_only=True) + self.get(group=dg_nr, index=ch_nr, samples_only=True, raw=raw) for ch_nr, dg_nr in dep.referenced_channels ] if cycles_nr: @@ -2411,30 +2455,31 @@ def get(self, vals = vals >> bit_offset if not bits == size * 8: - mask = (1 << bits) - 1 - if vals.flags.writeable: - vals &= mask + if data_type in v23c.SIGNED_INT: + vals = as_non_byte_sized_signed_int(vals, bits) else: - vals = vals & mask - if data_type in v3c.SIGNED_INT: - size = vals.dtype.itemsize - mask = (1 << (size * 8)) - 1 - mask = (mask << bits) & mask - vals |= mask - vals = vals.astype('', were - '' is the first conter that produces a new file name (that does - not already exist in the filesystem). + is overwritten, otherwise the file name is appended with '_', + were '' is the first counter that produces a new file name (that + does not already exist in the filesystem). Parameters ---------- @@ -2801,43 +2893,43 @@ def save(self, dst='', overwrite=None, compression=0): does nothing for mdf version3; introduced here to share the same API as mdf version 4 files + Returns + ------- + output_file : str + output file name + """ if overwrite is None: overwrite = self._overwrite + output_file = '' if self.name is None and dst == '': message = ('Must specify a destination file name ' 'for MDF created from scratch') raise MdfException(message) - dst = dst if dst else self.name - if overwrite is False: - if os.path.isfile(dst): - cntr = 0 - while True: - name = os.path.splitext(dst)[0] + '_{}.mdf'.format(cntr) - if not os.path.isfile(name): - break - else: - cntr += 1 - message = ('Destination file "{}" already exists ' - 'and "overwrite" is False. Saving MDF file as "{}"') - message = message.format(dst, name) - warnings.warn(message) - dst = name - - if self.memory != 'minimum': - self._save_with_metadata(dst, overwrite, compression) + if self.memory == 'minimum': + output_file = self._save_without_metadata( + dst, + overwrite, + compression, + ) else: - self._save_without_metadata(dst, overwrite, compression) + output_file = self._save_with_metadata( + dst, + overwrite, + compression, + ) + + return output_file def _save_with_metadata(self, dst, overwrite, compression): """Save MDF to *dst*. If *dst* is not provided the the destination file name is the MDF name. If overwrite is *True* then the destination file - is overwritten, otherwise the file name is appended with '_', were - '' is the first counter that produces a new file name (that does - not already exist in the filesystem). + is overwritten, otherwise the file name is appended with '_', + were '' is the first counter that produces a new file name (that + does not already exist in the filesystem). Parameters ---------- @@ -2850,6 +2942,7 @@ def _save_with_metadata(self, dst, overwrite, compression): API as mdf version 4 files """ + # pylint: disable=unused-argument if self.file_history is None: self.file_history = TextBlock(text=''' @@ -2871,11 +2964,15 @@ def _save_with_metadata(self, dst, overwrite, compression): self.file_history = TextBlock(text=text) if self.name is None and dst == '': - message = ('Must specify a destination file name ' - 'for MDF created from scratch') + message = ( + 'Must specify a destination file name ' + 'for MDF created from scratch' + ) raise MdfException(message) dst = dst if dst else self.name + if not dst.endswith(('mdf', 'MDF')): + dst = dst + '.mdf' if overwrite is False: if os.path.isfile(dst): cntr = 0 @@ -2885,8 +2982,10 @@ def _save_with_metadata(self, dst, overwrite, compression): break else: cntr += 1 - message = ('Destination file "{}" already exists ' - 'and "overwrite" is False. Saving MDF file as "{}"') + message = ( + 'Destination file "{}" already exists ' + 'and "overwrite" is False. Saving MDF file as "{}"' + ) message = message.format(dst, name) warnings.warn(message) dst = name @@ -2921,7 +3020,7 @@ def _save_with_metadata(self, dst, overwrite, compression): address = 0 blocks.append(self.identification) - address += v3c.ID_BLOCK_SIZE + address += v23c.ID_BLOCK_SIZE blocks.append(self.header) address += self.header['block_len'] @@ -2930,23 +3029,35 @@ def _save_with_metadata(self, dst, overwrite, compression): blocks.append(self.file_history) address += self.file_history['block_len'] + ce_map = {} + cc_map = {} + # DataGroup # put them first in the block list so they will be written first to # disk this way, in case of memory=False, we can safely # restore he original data block address + gp_rec_ids = [] + + original_data_block_addrs = [ + group['data_group']['data_block_addr'] + for group in self.groups + ] + for gp in self.groups: dg = gp['data_group'] + gp_rec_ids.append(dg['record_id_nr']) + dg['record_id_nr'] = 0 blocks.append(dg) dg.address = address address += dg['block_len'] if self.groups: for i, dg in enumerate(self.groups[:-1]): - addr = self.groups[i+1]['data_group'].address + addr = self.groups[i + 1]['data_group'].address dg['data_group']['next_dg_addr'] = addr self.groups[-1]['data_group']['next_dg_addr'] = 0 - for gp in self.groups: + for idx, gp in enumerate(self.groups): gp_texts = gp['texts'] # Texts @@ -2971,22 +3082,32 @@ def _save_with_metadata(self, dst, overwrite, compression): if conv is None: continue - conv.address = address - if conv['conversion_type'] == v3c.CONVERSION_TYPE_VTABR: + if conv['conversion_type'] == v23c.CONVERSION_TYPE_VTABR: + conv.address = address pairs = gp_texts['conversion_tab'][i].items() for key, item in pairs: conv[key] = item.address - blocks.append(conv) - address += conv['block_len'] + blocks.append(conv) + address += conv['block_len'] + else: + cc_id = id(conv) + if cc_id not in cc_map: + conv.address = address + cc_map[cc_id] = conv + blocks.append(conv) + address += conv['block_len'] # Channel Extension cs = gp['channel_extensions'] for source in cs: if source: - source.address = address - blocks.append(source) - address += source['block_len'] + source_id = id(source) + if source_id not in ce_map: + source.address = address + ce_map[source_id] = source + blocks.append(source) + address += source['block_len'] # Channel Dependency cd = gp['channel_dependencies'] @@ -2997,22 +3118,56 @@ def _save_with_metadata(self, dst, overwrite, compression): address += dep['block_len'] # Channels - ch_texts = gp_texts['channels'] for i, channel in enumerate(gp['channels']): channel.address = address - channel_texts = ch_texts[i] - blocks.append(channel) - address += v3c.CN_BLOCK_SIZE - - if channel_texts: - for key in ('long_name_addr', - 'comment_addr', - 'display_name_addr'): - if key in channel_texts: - channel[key] = channel_texts[key].address + address += channel['block_len'] + + comment = channel.comment + if comment: + if len(comment) >= 128: + channel['description'] = b'\0' + tx_block = TextBlock(text=comment) + text = tx_block['text'] + if text in defined_texts: + channel['comment_addr'] = defined_texts[text].address else: - channel[key] = 0 + channel['comment_addr'] = address + defined_texts[text] = tx_block + tx_block.address = address + blocks.append(tx_block) + address += tx_block['block_len'] + else: + channel['description'] = (comment[:127] + '\0').encode('latin-1') + channel['comment_addr'] = 0 + if self.version >= '2.10': + if len(channel.name) >= 32: + tx_block = TextBlock(text=channel.name) + text = tx_block['text'] + if text in defined_texts: + channel['long_name_addr'] = defined_texts[text].address + else: + channel['long_name_addr'] = address + defined_texts[text] = tx_block + tx_block.address = address + blocks.append(tx_block) + address += tx_block['block_len'] + else: + channel['long_name_addr'] = 0 + if 'display_name_addr' in channel: + if channel.display_name: + tx_block = TextBlock(text=channel.display_name) + text = tx_block['text'] + if text in defined_texts: + channel['display_name_addr'] = defined_texts[text].address + else: + channel['display_name_addr'] = address + defined_texts[text] = tx_block + tx_block.address = address + blocks.append(tx_block) + address += tx_block['block_len'] + else: + channel['display_name_addr'] = 0 channel['conversion_addr'] = cc[i].address if cc[i] else 0 if cs[i]: @@ -3024,9 +3179,11 @@ def _save_with_metadata(self, dst, overwrite, compression): else: channel['ch_depend_addr'] = 0 - for channel, next_channel in pair(gp['channels']): - channel['next_ch_addr'] = next_channel.address - next_channel['next_ch_addr'] = 0 + count = len(gp['channels']) + if count: + for i in range(count-1): + gp['channels'][i]['next_ch_addr'] = gp['channels'][i+1].address + gp['channels'][-1]['next_ch_addr'] = 0 # ChannelGroup cg = gp['channel_group'] @@ -3048,30 +3205,25 @@ def _save_with_metadata(self, dst, overwrite, compression): trigger_text.address = address blocks.append(trigger_text) address += trigger_text['block_len'] - trigger['comment_addr'] = trigger_text.address + trigger['text_addr'] = trigger_text.address else: - trigger['comment_addr'] = 0 + trigger['text_addr'] = 0 trigger.address = address blocks.append(trigger) address += trigger['block_len'] # DataBlock - original_data_addr = gp['data_group']['data_block_addr'] + if self.memory == 'full': + blocks.append(gp['data_block']) + else: + blocks.append(self._load_group_data(gp)) + if gp['size']: gp['data_group']['data_block_addr'] = address else: gp['data_group']['data_block_addr'] = 0 - address += gp['size'] - if self.memory == 'full': - blocks.append(gp['data_block']) - else: - # trying to call bytes([gp, address]) will result in an - # exceptionthat be used as a flag for non existing data - # block in caseof memory=False, the address is - # the actual addressof the data group's data within the - # original file - blocks.append([gp, original_data_addr]) + address += gp['size'] - gp_rec_ids[idx] * gp['channel_group']['cycles_nr'] # update referenced channels addresses in the channel dependecies for gp in self.groups: @@ -3103,17 +3255,14 @@ def _save_with_metadata(self, dst, overwrite, compression): self.header['program_addr'] = 0 for block in blocks: - try: - write(bytes(block)) - except: - # this will only be executed for data blocks when - # memory=False - gp, address = block - # restore data block address from original file so that - # future calls to get will still work after the save - gp['data_group']['data_block_addr'] = address - data = self._load_group_data(gp) - write(data) + write(bytes(block)) + + for gp, rec_id, original_address in zip( + self.groups, + gp_rec_ids, + original_data_block_addrs): + gp['data_group']['record_id_nr'] = rec_id + gp['data_group']['data_block_addr'] = original_address if self.memory == 'low' and dst == self.name: self.close() @@ -3129,19 +3278,19 @@ def _save_with_metadata(self, dst, overwrite, compression): self.attachments = [] self.file_comment = None - self._ch_map = {} self._master_channel_cache = {} self._tempfile = TemporaryFile() self._file = open(self.name, 'rb') self._read() + return dst def _save_without_metadata(self, dst, overwrite, compression): """Save MDF to *dst*. If *dst* is not provided the the destination file name is the MDF name. If overwrite is *True* then the destination file - is overwritten, otherwise the file name is appended with '_', were - '' is the first counter that produces a new file name (that does - not already exist in the filesystem). + is overwritten, otherwise the file name is appended with '_', + were '' is the first counter that produces a new file name (that + does not already exist in the filesystem). Parameters ---------- @@ -3154,6 +3303,7 @@ def _save_without_metadata(self, dst, overwrite, compression): API as mdf version 4 files """ + # pylint: disable=unused-argument if self.file_history is None: self.file_history = TextBlock(text=''' @@ -3189,6 +3339,33 @@ def _save_without_metadata(self, dst, overwrite, compression): # the reference to the data group object and the original link to the # data block in the soource MDF file. + if self.name is None and dst == '': + message = ( + 'Must specify a destination file name ' + 'for MDF created from scratch' + ) + raise MdfException(message) + + dst = dst if dst else self.name + if not dst.endswith(('mdf', 'MDF')): + dst = dst + '.mdf' + if overwrite is False: + if os.path.isfile(dst): + cntr = 0 + while True: + name = os.path.splitext(dst)[0] + '_{}.mdf'.format(cntr) + if not os.path.isfile(name): + break + else: + cntr += 1 + message = ( + 'Destination file "{}" already exists ' + 'and "overwrite" is False. Saving MDF file as "{}"' + ) + message = message.format(dst, name) + warnings.warn(message) + dst = name + if dst == self.name: destination = dst + '.temp' else: @@ -3220,9 +3397,17 @@ def _save_without_metadata(self, dst, overwrite, compression): data_address = [] + ce_map = {} + cc_map = {} + for gp in self.groups: + gp['temp_channels'] = ch_addrs = [] + gp['temp_channel_conversions'] = cc_addrs = [] + gp['temp_channel_extensions'] = ce_addrs = [] + gp['temp_channel_dependencies'] = cd_addrs = [] + gp_texts = deepcopy(gp['texts']) - if gp['data_location'] == v3c.LOCATION_ORIGINAL_FILE: + if gp['data_location'] == v23c.LOCATION_ORIGINAL_FILE: stream = self._file else: stream = self._tempfile @@ -3248,93 +3433,125 @@ def _save_without_metadata(self, dst, overwrite, compression): my_dict[key] = address write(bytes(block)) - # ChannelConversions - cc = gp['temp_channel_conversions'] = [] - for i, conv in enumerate(gp['channel_conversions']): - if not conv: - gp['temp_channel_conversions'].append(0) - continue - - address = tell() - gp['temp_channel_conversions'].append(address) - conv = ChannelConversion( - address=conv, - stream=stream, - ) - if conv['conversion_type'] == v3c.CONVERSION_TYPE_VTABR: - pairs = gp_texts['conversion_tab'][i].items() - for key, item in pairs: - conv[key] = item - - write(bytes(conv)) - - # Channel Extension - cs = gp['temp_channel_extensions'] = [] - for source in gp['channel_extensions']: - if source: - address = tell() - gp['temp_channel_extensions'].append(address) - source = ChannelExtension( - address=source, - stream=stream, - ) - write(bytes(source)) - else: - gp['temp_channel_extensions'].append(0) - # Channel Dependency - cd = gp['temp_channel_dependencies'] = [] for dep in gp['channel_dependencies']: if dep: address = tell() - gp['temp_channel_dependencies'].append(address) - dep.address = address + cd_addrs.append(address) write(bytes(dep)) else: - gp['temp_channel_dependencies'].append(0) + cd_addrs.append(0) + + # channel extensions + for addr in gp['channel_extensions']: + if addr: + stream.seek(addr) + raw_bytes = stream.read(v23c.CE_BLOCK_SIZE) + if raw_bytes in ce_map: + ce_addrs.append(ce_map[raw_bytes]) + else: + address = tell() + source = ChannelExtension(raw_bytes=raw_bytes) + ce_map[raw_bytes] = address + ce_addrs.append(address) + write(bytes(source)) + else: + ce_addrs.append(0) + + # ChannelConversions + for i, addr in enumerate(gp['channel_conversions']): + if not addr: + cc_addrs.append(0) + continue + + stream.seek(addr+2) + size = unpack('(?P(.|\n)+?)') + PYVERSION = sys.version_info[0] if PYVERSION == 2: + # pylint: disable=W0622 from .utils import bytes + # pylint: enable=W0622 __all__ = ['MDF4', ] @@ -157,6 +159,8 @@ def __init__(self, name=None, memory='full', version='4.10'): self._ch_map = {} self._master_channel_cache = {} + self._si_map = {} + self._cc_map = {} # used for appending when memory=False self._tempfile = TemporaryFile() @@ -231,10 +235,13 @@ def _read(self): # read file history fh_addr = self.header['file_history_addr'] while fh_addr: - fh = FileHistory(address=fh_addr, stream=stream) - fh_text = TextBlock(address=fh['comment_addr'], stream=stream) - self.file_history.append((fh, fh_text)) - fh_addr = fh['next_fh_addr'] + history_block = FileHistory(address=fh_addr, stream=stream) + history_text = TextBlock( + address=history_block['comment_addr'], + stream=stream, + ) + self.file_history.append((history_block, history_text)) + fh_addr = history_block['next_fh_addr'] # read attachments at_addr = self.header['first_attachment_addr'] @@ -279,9 +286,6 @@ def _read(self): # channel_group is lsit to allow uniform handling of all texts # in save method grp['texts'] = { - 'channels': [], - 'sources': [], - 'conversions': [], 'conversion_tab': [], 'channel_group': [], } @@ -296,11 +300,14 @@ def _read(self): samples_size = channel_group['samples_byte_nr'] inval_size = channel_group['invalidation_bytes_nr'] record_id = channel_group['record_id'] - + if PYVERSION == 2: + record_id = chr(record_id) cg_size[record_id] = samples_size + inval_size else: # VLDS flags record_id = channel_group['record_id'] + if PYVERSION == 2: + record_id = chr(record_id) cg_size[record_id] = 0 if record_id_nr: @@ -353,7 +360,22 @@ def _read(self): if memory == 'full': grp['data_location'] = v4c.LOCATION_MEMORY dat_addr = group['data_block_addr'] - data = self._read_data_block(address=dat_addr, stream=stream) + + if record_id_nr == 0: + size = channel_group['samples_byte_nr'] + size *= channel_group['cycles_nr'] + else: + size = sum( + (gp['channel_group']['samples_byte_nr'] + record_id_nr) + * gp['channel_group']['cycles_nr'] + for gp in new_groups + ) + + data = self._read_data_block( + address=dat_addr, + stream=stream, + size=size, + ) if record_id_nr == 0: grp = new_groups[0] @@ -370,10 +392,10 @@ def _read(self): i += 1 rec_size = cg_size[rec_id] if rec_size: - rec_data = data[i: i+rec_size] + rec_data = data[i: i + rec_size] cg_data[rec_id].append(rec_data) else: - rec_size = unpack('= 0: + data = bytearray(size) + view = memoryview(data) + position = 0 + while address: + dl = DataList(address=address, stream=stream) + for i in range(dl['links_nr'] - 1): + addr = dl['data_block_addr{}'.format(i)] + stream.seek(addr, v4c.SEEK_START) + id_string = stream.read(4) + if id_string == b'##DT': + _, dim, __ = unpack('<4s2Q', stream.read(20)) + dim -= 24 + position += stream.readinto(view[position: position+dim]) + elif id_string == b'##DZ': + block = DataZippedBlock( + stream=stream, address=addr, + ) + uncompressed_size = block['original_size'] + view[position: position+uncompressed_size] = block['data'] + position += uncompressed_size + address = dl['next_dl_addr'] + + else: + + data = [] + while address: + dl = DataList(address=address, stream=stream) + for i in range(dl['links_nr'] - 1): + addr = dl['data_block_addr{}'.format(i)] + stream.seek(addr, v4c.SEEK_START) + id_string = stream.read(4) + if id_string == b'##DT': + block = DataBlock(stream=stream, address=addr) + data.append(block['data']) + elif id_string == b'##DZ': + block = DataZippedBlock( stream=stream, + address=addr, ) - ) - address = dl['next_dl_addr'] - data = b''.join(data) + data.append(block['data']) + address = dl['next_dl_addr'] + data = b''.join(data) # or a header list elif id_string == b'##HL': hl = HeaderList(address=address, stream=stream) @@ -761,6 +810,8 @@ def _load_group_data(self, group): cg_data = [] cg_size = group['record_size'] record_id = channel_group['record_id'] + if PYVERSION == 2: + record_id = chr(record_id) if data_group['record_id_len'] <= 2: record_id_nr = data_group['record_id_len'] else: @@ -774,10 +825,10 @@ def _load_group_data(self, group): rec_size = cg_size[rec_id] if rec_size: if rec_id == record_id: - rec_data = data[i: i+rec_size] + rec_data = data[i: i + rec_size] cg_data.append(rec_data) else: - rec_size = unpack('> 3 shape = tuple( @@ -1073,6 +1127,8 @@ def _get_not_byte_aligned_data(self, data, group, ch_nr): vals = fromstring(data, dtype=dtype(types)) + vals.setflags(write=False) + vals = vals['vals'] if channel['data_type'] not in big_endian_types: @@ -1122,7 +1178,7 @@ def _get_not_byte_aligned_data(self, data, group, ch_nr): vals = fromarrays([vals, extra], dtype=dtype(types)) vals = vals.tostring() - fmt = get_fmt(channel['data_type'], size) + fmt = get_fmt_v4(channel['data_type'], size) if size <= byte_count: types = [ ('vals', fmt), @@ -1172,8 +1228,10 @@ def _validate_channel_selection(self, name=None, group=None, index=None): """ if name is None: if group is None or index is None: - message = ('Invalid arguments for channel selection: ' - 'must give "name" or, "group" and "index"') + message = ( + 'Invalid arguments for channel selection: ' + 'must give "name" or, "group" and "index"' + ) raise MdfException(message) else: gp_nr, ch_nr = group, index @@ -1189,53 +1247,41 @@ def _validate_channel_selection(self, name=None, group=None, index=None): if group is None: gp_nr, ch_nr = self.channels_db[name][0] if len(self.channels_db[name]) > 1: - message = ('Multiple occurances for channel "{}". ' - 'Using first occurance from data group {}. ' - 'Provide both "group" and "index" arguments' - ' to select another data group') + message = ( + 'Multiple occurances for channel "{}". ' + 'Using first occurance from data group {}. ' + 'Provide both "group" and "index" arguments' + ' to select another data group' + ) message = message.format(name, gp_nr) warnings.warn(message) else: - group_valid = False for gp_nr, ch_nr in self.channels_db[name]: if gp_nr == group: - group_valid = True if index is None: break elif index == ch_nr: break else: - if group_valid: - gp_nr, ch_nr = self.channels_db[name][group] - message = ('You have selected channel index "{}"' - 'of group "{}" for channel "{}", but ' - 'this channel index is invalid. Using ' - 'first occurance of "{}" in this group' - ' at index "{}"') - message = message.format( - index, - group, - name, - name, - ch_nr, - ) + if index is None: + message = 'Channel "{}" not found in group {}' + message = message.format(name, group) else: - gp_nr, ch_nr = self.channels_db[name][0] - message = ('You have selected group "{}" for ' - 'channel "{}", but this channel was not' - ' found in this group, or this group ' - 'index does not exist. Using first ' - 'occurance of "{}" from group "{}"') - message = message.format(group, name, name, gp_nr) - warnings.warn(message) + message = ( + 'Channel "{}" not found in group {} ' + 'at index {}' + ) + message = message.format(name, group, index) + raise MdfException(message) + return gp_nr, ch_nr def append(self, signals, source_info='Python', common_timebase=False): """ Appends a new data group. - For channel dependencies type Signals, the *samples* attribute must be a - numpy.recarray + For channel dependencies type Signals, the *samples* attribute must be + a numpy.recarray Parameters ---------- @@ -1302,12 +1348,13 @@ def append(self, signals, source_info='Python', common_timebase=False): # be saved as new signals. simple_signals = [ sig for sig in signals - if len(sig.samples.shape) <= 1 and sig.samples.dtype.names is None + if len(sig.samples.shape) <= 1 + and sig.samples.dtype.names is None ] composed_signals = [ sig for sig in signals - if len(sig.samples.shape) > 1 or - sig.samples.dtype.names + if len(sig.samples.shape) > 1 + or sig.samples.dtype.names ] dg_cntr = len(self.groups) @@ -1318,9 +1365,6 @@ def append(self, signals, source_info='Python', common_timebase=False): gp['channel_sources'] = gp_source = [] gp['channel_dependencies'] = gp_dep = [] gp['texts'] = gp_texts = { - 'channels': [], - 'sources': [], - 'conversions': [], 'conversion_tab': [], 'channel_group': [], } @@ -1338,61 +1382,57 @@ def append(self, signals, source_info='Python', common_timebase=False): # setup all blocks related to the time master channel # time channel texts - for key in ('channels', 'sources', 'channel_group', 'conversions'): - gp['texts'][key].append({}) - for key in ('conversion_tab', ): - gp['texts'][key].append(None) + for key in ('conversion_tab',): + gp_texts[key].append(None) memory = self.memory file = self._tempfile write = file.write tell = file.tell - block = TextBlock(text='t', meta=False) - if memory != 'minimum': - gp_texts['channels'][-1]['name_addr'] = block - else: - address = tell() - gp_texts['channels'][-1]['name_addr'] = address + if memory == 'minimum': + block = TextBlock(text='t', meta=False) + channel_name_addr = tell() write(bytes(block)) - block = TextBlock(text='s', meta=False) - if memory != 'minimum': - gp_texts['conversions'][-1]['unit_addr'] = block - else: - address = tell() - gp_texts['conversions'][-1]['unit_addr'] = address + if memory == 'minimum': + block = TextBlock(text='s', meta=False) + channel_unit_addr = tell() write(bytes(block)) - si_text = TextBlock(text=source_info, meta=False) if memory == 'minimum': - address = tell() - si_text.address = address - write(bytes(si_text)) - gp_texts['sources'][-1]['name_addr'] = address - gp_texts['sources'][-1]['path_addr'] = address - gp_texts['channel_group'][-1]['acq_name_addr'] = address - gp_texts['channel_group'][-1]['comment_addr'] = address + block = TextBlock(text=source_info, meta=False) + source_text_address = tell() + write(bytes(block)) else: - gp_texts['sources'][-1]['name_addr'] = si_text - gp_texts['sources'][-1]['path_addr'] = si_text - gp_texts['channel_group'][-1]['acq_name_addr'] = si_text - gp_texts['channel_group'][-1]['comment_addr'] = si_text + source_text_address = 0 + + source_block = SourceInformation( + name_addr=source_text_address, + path_addr=source_text_address, + ) + source_block.name = source_block.path = source_info + + source_info_address = tell() + write(bytes(source_block)) # conversion and source for time channel if memory == 'minimum': gp_conv.append(0) - - address = tell() - block = SourceInformation() - write(bytes(block)) - gp_source.append(address) + gp_source.append(source_info_address) else: gp_conv.append(None) - gp_source.append(SourceInformation()) + gp_source.append(source_block) + + if memory == 'minimum': + name_addr = channel_name_addr + unit_addr = channel_unit_addr + else: + name_addr = 0 + unit_addr = 0 # time channel - t_type, t_size = fmt_to_datatype(t.dtype) + t_type, t_size = fmt_to_datatype_v4(t.dtype) kargs = { 'channel_type': v4c.CHANNEL_TYPE_MASTER, 'data_type': t_type, @@ -1405,14 +1445,18 @@ def append(self, signals, source_info='Python', common_timebase=False): 'lower_limit': t[0] if cycles_nr else 0, 'upper_limit': t[-1] if cycles_nr else 0, 'flags': v4c.FLAG_PHY_RANGE_OK | v4c.FLAG_VAL_RANGE_OK, + 'name_addr': name_addr, + 'unit_addr': unit_addr, } ch = Channel(**kargs) - ch.name = name = 't' + name = 't' if memory == 'minimum': address = tell() write(bytes(ch)) gp_channels.append(address) else: + ch.name = name + ch.unit = 's' gp_channels.append(ch) if name not in self.channels_db: @@ -1452,6 +1496,7 @@ def append(self, signals, source_info='Python', common_timebase=False): int(min_val).bit_length(), int(max_val).bit_length(), ) + bit_length += 1 signal['bit_count'] = max(minimum_bitlength, bit_length) @@ -1504,30 +1549,26 @@ def append(self, signals, source_info='Python', common_timebase=False): max_val = signal_d['max'] name = signal.name - for key in ('channels', 'sources'): - gp['texts'][key].append({}) - for key in ('conversion_tab', 'conversions'): - gp['texts'][key].append(None) + gp_texts['conversion_tab'].append(None) - block = TextBlock(text=name, meta=False) if memory == 'minimum': - address = tell() - gp_texts['channels'][-1]['name_addr'] = address + block = TextBlock(text=name, meta=False) + channel_name_address = tell() write(bytes(block)) - gp_texts['sources'][-1]['name_addr'] = si_text.address - gp_texts['sources'][-1]['path_addr'] = si_text.address - else: - gp_texts['channels'][-1]['name_addr'] = block - gp_texts['sources'][-1]['name_addr'] = si_text - gp_texts['sources'][-1]['path_addr'] = si_text - if signal.unit: - block = TextBlock(text=signal.unit, meta=False) - if memory == 'minimum': - address = tell() - gp_texts['channels'][-1]['unit_addr'] = address + + if signal.unit: + block = TextBlock(text=signal.unit, meta=False) + channel_unit_address = tell() + write(bytes(block)) + else: + channel_unit_address = 0 + + if signal.comment: + block = TextBlock(text=signal.comment, meta=False) + channel_comment_address = tell() write(bytes(block)) else: - gp_texts['channels'][-1]['unit_addr'] = block + channel_comment_address = 0 # conversions for channel info = signal.info @@ -1570,7 +1611,7 @@ def append(self, signals, source_info='Python', common_timebase=False): address = tell() conv_texts_tab['text_{}'.format(i)] = address write(bytes(block)) - if info.get('default', b''): + if 'default' in info: block = TextBlock( text=info['default'], meta=False, @@ -1581,7 +1622,6 @@ def append(self, signals, source_info='Python', common_timebase=False): address = tell() conv_texts_tab['default_addr'] = address write(bytes(block)) - kargs['default_addr'] = 0 kargs['links_nr'] = len(raw) + 5 block = ChannelConversion(**kargs) if memory != 'minimum': @@ -1597,7 +1637,6 @@ def append(self, signals, source_info='Python', common_timebase=False): upper = info['upper'] texts = info['phys'] kargs['ref_param_nr'] = len(upper) - kargs['default_addr'] = info.get('default', 0) kargs['links_nr'] = len(lower) + 5 for i, (u_, l_, t_) in enumerate(zip(upper, lower, texts)): @@ -1615,7 +1654,7 @@ def append(self, signals, source_info='Python', common_timebase=False): address = tell() conv_texts_tab['text_{}'.format(i)] = address write(bytes(block)) - if info.get('default', b''): + if 'default' in info: block = TextBlock( text=info['default'], meta=False, @@ -1626,8 +1665,9 @@ def append(self, signals, source_info='Python', common_timebase=False): address = tell() conv_texts_tab['default_addr'] = address write(bytes(block)) - kargs['default_addr'] = 0 + block = ChannelConversion(**kargs) + if memory != 'minimum': gp_conv.append(block) else: @@ -1642,16 +1682,22 @@ def append(self, signals, source_info='Python', common_timebase=False): gp_conv.append(0) if conv_texts_tab: - gp['texts']['conversion_tab'][-1] = conv_texts_tab + gp_texts['conversion_tab'][-1] = conv_texts_tab # source for channel if memory != 'minimum': - gp_source.append(SourceInformation()) + gp_source.append(source_block) else: - address = tell() - block = SourceInformation() - write(bytes(block)) - gp_source.append(address) + gp_source.append(source_info_address) + + if memory == 'minimum': + name_addr = channel_name_address + unit_addr = channel_unit_address + comment_addr = channel_comment_address + else: + name_addr = 0 + unit_addr = 0 + comment_addr = 0 # compute additional byte offset for large records size if signal.samples.dtype.kind == 'u': @@ -1668,14 +1714,19 @@ def append(self, signals, source_info='Python', common_timebase=False): 'max_raw_value': max_val if min_val <= max_val else 0, 'lower_limit': min_val if min_val <= max_val else 0, 'upper_limit': max_val if min_val <= max_val else 0, + 'name_addr': name_addr, + 'unit_addr': unit_addr, + 'comment_addr': comment_addr, } if min_val > max_val: kargs['flags'] = 0 else: kargs['flags'] = v4c.FLAG_PHY_RANGE_OK | v4c.FLAG_VAL_RANGE_OK ch = Channel(**kargs) - ch.name = name if memory != 'minimum': + ch.name = name + ch.unit = signal.unit + ch.comment = signal.comment gp_channels.append(ch) else: address = tell() @@ -1704,36 +1755,30 @@ def append(self, signals, source_info='Python', common_timebase=False): # first add the signals in the simple signal list for signal in simple_signals: name = signal.name - for key in ('channels', 'sources'): - gp['texts'][key].append({}) - for key in ('conversion_tab', 'conversions'): - gp['texts'][key].append(None) + gp_texts['conversion_tab'].append(None) - block = TextBlock(text=name, meta=False) - if memory != 'minimum': - gp_texts['channels'][-1]['name_addr'] = block - else: - address = tell() + if memory == 'minimum': + block = TextBlock(text=name, meta=False) + channel_name_address = tell() write(bytes(block)) - gp_texts['channels'][-1]['name_addr'] = address - if signal.unit: - block = TextBlock( - text=signal.unit, - meta=False, - ) - if memory != 'minimum': - gp_texts['channels'][-1]['unit_addr'] = block - else: - address = tell() + + if signal.unit: + block = TextBlock( + text=signal.unit, + meta=False, + ) + + channel_unit_address = tell() write(bytes(block)) - gp_texts['channels'][-1]['unit_addr'] = address + else: + channel_unit_address = 0 - if memory != 'minimum': - gp_texts['sources'][-1]['name_addr'] = si_text - gp_texts['sources'][-1]['path_addr'] = si_text - else: - gp_texts['sources'][-1]['name_addr'] = si_text.address - gp_texts['sources'][-1]['path_addr'] = si_text.address + if signal.comment: + block = TextBlock(text=signal.comment, meta=False) + channel_comment_address = tell() + write(bytes(block)) + else: + channel_comment_address = 0 # conversions for channel info = signal.info @@ -1776,7 +1821,7 @@ def append(self, signals, source_info='Python', common_timebase=False): address = tell() conv_texts_tab['text_{}'.format(i)] = address write(bytes(block)) - if info.get('default', b''): + if 'default' in info: block = TextBlock( text=info['default'], meta=False, @@ -1787,8 +1832,8 @@ def append(self, signals, source_info='Python', common_timebase=False): address = tell() conv_texts_tab['default_addr'] = address write(bytes(block)) - kargs['default_addr'] = 0 kargs['links_nr'] = len(raw) + 5 + block = ChannelConversion(**kargs) if memory != 'minimum': gp_conv.append(block) @@ -1796,6 +1841,7 @@ def append(self, signals, source_info='Python', common_timebase=False): address = tell() gp_conv.append(address) write(bytes(block)) + elif info and 'lower' in info: kargs = {} kargs['conversion_type'] = v4c.CONVERSION_TYPE_RTABX @@ -1803,7 +1849,6 @@ def append(self, signals, source_info='Python', common_timebase=False): upper = info['upper'] texts = info['phys'] kargs['ref_param_nr'] = len(upper) - kargs['default_addr'] = info.get('default', 0) kargs['links_nr'] = len(lower) + 5 for i, (u_, l_, t_) in enumerate(zip(upper, lower, texts)): @@ -1821,7 +1866,7 @@ def append(self, signals, source_info='Python', common_timebase=False): address = tell() conv_texts_tab['text_{}'.format(i)] = address write(bytes(block)) - if info.get('default', b''): + if 'default' in info: block = TextBlock( text=info['default'], meta=False, @@ -1832,7 +1877,6 @@ def append(self, signals, source_info='Python', common_timebase=False): address = tell() conv_texts_tab['default_addr'] = address write(bytes(block)) - kargs['default_addr'] = 0 block = ChannelConversion(**kargs) if memory != 'minimum': gp_conv.append(block) @@ -1848,19 +1892,25 @@ def append(self, signals, source_info='Python', common_timebase=False): gp_conv.append(0) if conv_texts_tab: - gp['texts']['conversion_tab'][-1] = conv_texts_tab + gp_texts['conversion_tab'][-1] = conv_texts_tab # source for channel if memory != 'minimum': - gp_source.append(SourceInformation()) + gp_source.append(source_block) else: - block = SourceInformation() - address = tell() - write(bytes(block)) - gp_source.append(address) + gp_source.append(source_info_address) + + if memory == 'minimum': + name_addr = channel_name_address + unit_addr = channel_unit_address + comment_addr = channel_comment_address + else: + name_addr = 0 + unit_addr = 0 + comment_addr = 0 # compute additional byte offset for large records size - s_type, s_size = fmt_to_datatype(signal.samples.dtype) + s_type, s_size = fmt_to_datatype_v4(signal.samples.dtype) byte_size = max(s_size // 8, 1) min_val, max_val = get_min_max(signal.samples) kargs = { @@ -1873,14 +1923,19 @@ def append(self, signals, source_info='Python', common_timebase=False): 'max_raw_value': max_val if min_val <= max_val else 0, 'lower_limit': min_val if min_val <= max_val else 0, 'upper_limit': max_val if min_val <= max_val else 0, + 'name_addr': name_addr, + 'unit_addr': unit_addr, + 'comment_addr': comment_addr, } if min_val > max_val: kargs['flags'] = 0 else: kargs['flags'] = v4c.FLAG_PHY_RANGE_OK | v4c.FLAG_VAL_RANGE_OK ch = Channel(**kargs) - ch.name = name if memory != 'minimum': + ch.name = name + ch.unit = signal.unit + ch.comment = signal.comment gp_channels.append(ch) else: address = tell() @@ -1953,36 +2008,26 @@ def append(self, signals, source_info='Python', common_timebase=False): s_size = byte_size << 3 # add channel texts - for key in ('channels', 'sources'): - gp['texts'][key].append({}) - for key in ('conversion_tab', 'conversions'): - gp['texts'][key].append(None) + gp_texts['conversion_tab'].append(None) - block = TextBlock(text=name, meta=False) - if memory != 'minimum': - gp_texts['channels'][-1]['name_addr'] = block - else: - address = tell() + if memory == 'minimum': + block = TextBlock(text=name, meta=False) + channel_name_address = tell() write(bytes(block)) - gp_texts['channels'][-1]['name_addr'] = address - if signal.unit: - block = TextBlock( - text=signal.unit, - meta=False, - ) - if memory != 'minimum': - gp_texts['channels'][-1]['unit_addr'] = block - else: - address = tell() + if signal.unit: + block = TextBlock( + text=signal.unit, + meta=False, + ) + channel_unit_address = tell() write(bytes(block)) - gp_texts['channels'][-1]['unit_addr'] = address - if memory != 'minimum': - gp_texts['sources'][-1]['name_addr'] = si_text - gp_texts['sources'][-1]['path_addr'] = si_text - else: - gp_texts['sources'][-1]['name_addr'] = si_text.address - gp_texts['sources'][-1]['path_addr'] = si_text.address + if signal.comment: + block = TextBlock(text=signal.comment, meta=False) + channel_comment_address = tell() + write(bytes(block)) + else: + channel_comment_address = 0 # add channel conversion if memory != 'minimum': @@ -1990,18 +2035,23 @@ def append(self, signals, source_info='Python', common_timebase=False): else: gp_conv.append(0) - # source for time if memory != 'minimum': - gp_source.append(SourceInformation()) + gp_source.append(source_block) else: - address = tell() - block = SourceInformation() - write(bytes(block)) - gp_source.append(address) + gp_source.append(source_info_address) - # there is no chanel dependency + # there is no channel dependency gp_dep.append(None) + if memory == 'minimum': + name_addr = channel_name_address + unit_addr = channel_unit_address + comment_addr = channel_comment_address + else: + name_addr = 0 + unit_addr = 0 + comment_addr = 0 + # add channel block kargs = { 'channel_type': v4c.CHANNEL_TYPE_VALUE, @@ -2014,10 +2064,15 @@ def append(self, signals, source_info='Python', common_timebase=False): 'lower_limit': 0, 'upper_limit': 0, 'flags': 0, + 'name_addr': name_addr, + 'unit_addr': unit_addr, + 'comment_addr': comment_addr, } ch = Channel(**kargs) - ch.name = name if memory != 'minimum': + ch.name = name + ch.unit = signal.unit + ch.comment = signal.comment gp_channels.append(ch) else: address = tell() @@ -2037,7 +2092,7 @@ def append(self, signals, source_info='Python', common_timebase=False): ch_cntr += 1 - elif names and names[0] != signal.name: + elif names and names[0] != name: # here we have a structure channel composition field_name = get_unique_name(field_names, name) @@ -2045,36 +2100,26 @@ def append(self, signals, source_info='Python', common_timebase=False): # first we add the structure channel # add channel texts - for key in ('channels', 'sources'): - gp['texts'][key].append({}) - for key in ('conversion_tab', 'conversions'): - gp['texts'][key].append(None) + gp_texts['conversion_tab'].append(None) - block = TextBlock(text=name, meta=False) - if memory != 'minimum': - gp_texts['channels'][-1]['name_addr'] = block - else: - address = tell() + if memory == 'minimum': + block = TextBlock(text=name, meta=False) + channel_name_address = tell() write(bytes(block)) - gp_texts['channels'][-1]['name_addr'] = address - if signal.unit: - block = TextBlock( - text=signal.unit, - meta=False, - ) - if memory != 'minimum': - gp_texts['channels'][-1]['unit_addr'] = block - else: - address = tell() + + if signal.unit: + block = TextBlock(text=signal.unit, meta=False) + channel_unit_address = tell() write(bytes(block)) - gp_texts['channels'][-1]['unit_addr'] = address + else: + channel_unit_address = 0 - if memory != 'minimum': - gp_texts['sources'][-1]['name_addr'] = si_text - gp_texts['sources'][-1]['path_addr'] = si_text - else: - gp_texts['sources'][-1]['name_addr'] = si_text.address - gp_texts['sources'][-1]['path_addr'] = si_text.address + if signal.comment: + block = TextBlock(text=signal.comment, meta=False) + channel_comment_address = tell() + write(bytes(block)) + else: + channel_comment_address = 0 # add channel conversion if memory != 'minimum': @@ -2082,14 +2127,19 @@ def append(self, signals, source_info='Python', common_timebase=False): else: gp_conv.append(0) - # source for time if memory != 'minimum': - gp_source.append(SourceInformation()) + gp_source.append(source_block) else: - address = tell() - block = SourceInformation() - write(bytes(block)) - gp_source.append(address) + gp_source.append(source_info_address) + + if memory == 'minimum': + name_addr = channel_name_address + unit_addr = channel_unit_address + comment_addr = channel_comment_address + else: + name_addr = 0 + unit_addr = 0 + comment_addr = 0 # add channel block kargs = { @@ -2103,10 +2153,15 @@ def append(self, signals, source_info='Python', common_timebase=False): 'lower_limit': 0, 'upper_limit': 0, 'flags': 0, + 'name_addr': name_addr, + 'unit_addr': unit_addr, + 'comment_addr': comment_addr, } ch = Channel(**kargs) - ch.name = name if memory != 'minimum': + ch.name = name + ch.unit = signal.unit + ch.comment = signal.comment gp_channels.append(ch) else: address = tell() @@ -2133,32 +2188,33 @@ def append(self, signals, source_info='Python', common_timebase=False): samples = signal.samples[name] - s_type, s_size = fmt_to_datatype(samples.dtype) + s_type, s_size = fmt_to_datatype_v4(samples.dtype) byte_size = s_size >> 3 fields.append(samples) types.append((field_name, samples.dtype)) # add channel texts - for key in ('channels', 'sources'): - gp['texts'][key].append({}) - for key in ('conversion_tab', 'conversions'): - gp['texts'][key].append(None) + gp_texts['conversion_tab'].append(None) - block = TextBlock(text=name, meta=False) - if memory != 'minimum': - gp_texts['channels'][-1]['name_addr'] = block - else: - address = tell() + if memory == 'minimum': + block = TextBlock(text=name, meta=False) + channel_name_address = tell() write(bytes(block)) - gp_texts['channels'][-1]['name_addr'] = address - if memory != 'minimum': - gp_texts['sources'][-1]['name_addr'] = si_text - gp_texts['sources'][-1]['path_addr'] = si_text - else: - gp_texts['sources'][-1]['name_addr'] = si_text.address - gp_texts['sources'][-1]['path_addr'] = si_text.address + if signal.unit: + block = TextBlock(text=signal.unit, meta=False) + channel_unit_address = tell() + write(bytes(block)) + else: + channel_unit_address = 0 + + if signal.comment: + block = TextBlock(text=signal.comment, meta=False) + channel_comment_address = tell() + write(bytes(block)) + else: + channel_comment_address = 0 # add channel conversion if memory != 'minimum': @@ -2166,14 +2222,20 @@ def append(self, signals, source_info='Python', common_timebase=False): else: gp_conv.append(0) - # source for time + # source if memory != 'minimum': - gp_source.append(SourceInformation()) + gp_source.append(source_block) else: - address = tell() - block = SourceInformation() - write(bytes(block)) - gp_source.append(address) + gp_source.append(source_info_address) + + if memory == 'minimum': + name_addr = channel_name_address + unit_addr = channel_unit_address + comment_addr = channel_comment_address + else: + name_addr = 0 + unit_addr = 0 + comment_addr = 0 # add channel block min_val, max_val = get_min_max(signal.samples) @@ -2188,11 +2250,15 @@ def append(self, signals, source_info='Python', common_timebase=False): 'lower_limit': min_val if min_val <= max_val else 0, 'upper_limit': max_val if min_val <= max_val else 0, 'flags': v4c.FLAG_PHY_RANGE_OK | v4c.FLAG_VAL_RANGE_OK, + 'name_addr': name_addr, + 'unit_addr': unit_addr, + 'comment_addr': comment_addr, } ch = Channel(**kargs) - - ch.name = name if memory != 'minimum': + ch.name = name + ch.unit = signal.unit + ch.comment = signal.comment gp_channels.append(ch) dep_list.append(ch) else: @@ -2256,7 +2322,6 @@ def append(self, signals, source_info='Python', common_timebase=False): for i in range(dims_nr): kargs['dim_size_{}'.format(i)] = shape[i] - parent_dep = ChannelArrayBlock(**kargs) gp_dep.append([parent_dep, ]) @@ -2281,36 +2346,26 @@ def append(self, signals, source_info='Python', common_timebase=False): # first we add the structure channel # add channel texts - for key in ('channels', 'sources'): - gp['texts'][key].append({}) - for key in ('conversion_tab', 'conversions'): - gp['texts'][key].append(None) + gp_texts['conversion_tab'].append(None) - block = TextBlock(text=name, meta=False) - if memory != 'minimum': - gp_texts['channels'][-1]['name_addr'] = block - else: - address = tell() + if memory == 'minimum': + block = TextBlock(text=name, meta=False) + channel_name_address = tell() write(bytes(block)) - gp_texts['channels'][-1]['name_addr'] = address - if signal.unit: - block = TextBlock( - text=signal.unit, - meta=False, - ) - if memory != 'minimum': - gp_texts['channels'][-1]['unit_addr'] = block - else: - address = tell() + + if signal.unit: + block = TextBlock(text=signal.unit, meta=False) + channel_unit_address = tell() write(bytes(block)) - gp_texts['channels'][-1]['unit_addr'] = address + else: + channel_unit_address = 0 - if memory != 'minimum': - gp_texts['sources'][-1]['name_addr'] = si_text - gp_texts['sources'][-1]['path_addr'] = si_text - else: - gp_texts['sources'][-1]['name_addr'] = si_text.address - gp_texts['sources'][-1]['path_addr'] = si_text.address + if signal.comment: + block = TextBlock(text=signal.comment, meta=False) + channel_comment_address = tell() + write(bytes(block)) + else: + channel_comment_address = 0 # add channel conversion if memory != 'minimum': @@ -2318,16 +2373,22 @@ def append(self, signals, source_info='Python', common_timebase=False): else: gp_conv.append(0) - # source for time + # source for channel if memory != 'minimum': - gp_source.append(SourceInformation()) + gp_source.append(source_block) else: - address = tell() - block = SourceInformation() - write(bytes(block)) - gp_source.append(address) + gp_source.append(source_info_address) - s_type, s_size = fmt_to_datatype(samples.dtype) + if memory == 'minimum': + name_addr = channel_name_address + unit_addr = channel_unit_address + comment_addr = channel_comment_address + else: + name_addr = 0 + unit_addr = 0 + comment_addr = 0 + + s_type, s_size = fmt_to_datatype_v4(samples.dtype) # add channel block kargs = { @@ -2341,10 +2402,15 @@ def append(self, signals, source_info='Python', common_timebase=False): 'lower_limit': 0, 'upper_limit': 0, 'flags': 0, + 'name_addr': name_addr, + 'unit_addr': unit_addr, + 'comment_addr': comment_addr, } ch = Channel(**kargs) - ch.name = name if memory != 'minimum': + ch.name = name + ch.unit = signal.unit + ch.comment = signal.comment gp_channels.append(ch) else: address = tell() @@ -2374,26 +2440,26 @@ def append(self, signals, source_info='Python', common_timebase=False): fields.append(samples) types.append((field_name, samples.dtype, shape)) - # add composed parent signal texts - for key in ('channels', 'sources'): - gp['texts'][key].append({}) - for key in ('conversion_tab', 'conversions'): - gp['texts'][key].append(None) + gp_texts['conversion_tab'].append(None) - block = TextBlock(text=name, meta=False) - if memory != 'minimum': - gp_texts['channels'][-1]['name_addr'] = block - else: - address = tell() + if memory == 'minimum': + block = TextBlock(text=name, meta=False) + channel_name_address = tell() write(bytes(block)) - gp_texts['channels'][-1]['name_addr'] = address - if memory != 'minimum': - gp_texts['sources'][-1]['name_addr'] = si_text - gp_texts['sources'][-1]['path_addr'] = si_text - else: - gp_texts['sources'][-1]['name_addr'] = si_text.address - gp_texts['sources'][-1]['path_addr'] = si_text.address + if signal.unit: + block = TextBlock(text=signal.unit, meta=False) + channel_unit_address = tell() + write(bytes(block)) + else: + channel_unit_address = 0 + + if signal.comment: + block = TextBlock(text=signal.comment, meta=False) + channel_comment_address = tell() + write(bytes(block)) + else: + channel_comment_address = 0 # add channel conversion if memory != 'minimum': @@ -2401,14 +2467,20 @@ def append(self, signals, source_info='Python', common_timebase=False): else: gp_conv.append(0) - # source for time + # source for channel if memory != 'minimum': - gp_source.append(SourceInformation()) + gp_source.append(source_block) else: - address = tell() - block = SourceInformation() - write(bytes(block)) - gp_source.append(address) + gp_source.append(source_info_address) + + if memory == 'minimum': + name_addr = channel_name_address + unit_addr = channel_unit_address + comment_addr = channel_comment_address + else: + name_addr = 0 + unit_addr = 0 + comment_addr = 0 # add channel dependency block kargs = { @@ -2423,7 +2495,7 @@ def append(self, signals, source_info='Python', common_timebase=False): # add components channel min_val, max_val = get_min_max(samples) - s_type, s_size = fmt_to_datatype(samples.dtype) + s_type, s_size = fmt_to_datatype_v4(samples.dtype) byte_size = max(s_size // 8, 1) kargs = { 'channel_type': v4c.CHANNEL_TYPE_VALUE, @@ -2436,16 +2508,21 @@ def append(self, signals, source_info='Python', common_timebase=False): 'lower_limit': min_val if min_val <= max_val else 0, 'upper_limit': max_val if min_val <= max_val else 0, 'flags': v4c.FLAG_PHY_RANGE_OK | v4c.FLAG_VAL_RANGE_OK, + 'name_addr': name_addr, + 'unit_addr': unit_addr, + 'comment_addr': comment_addr, } - channel = Channel(**kargs) - channel.name = name + ch = Channel(**kargs) if memory != 'minimum': - gp_channels.append(channel) + ch.name = name + ch.unit = signal.unit + ch.comment = signal.comment + gp_channels.append(ch) else: address = tell() - write(bytes(channel)) + write(bytes(ch)) gp_channels.append(address) parent_dep.referenced_channels.append((ch_cntr, dg_cntr)) @@ -2469,6 +2546,7 @@ def append(self, signals, source_info='Python', common_timebase=False): } gp['channel_group'] = ChannelGroup(**kargs) gp['size'] = cycles_nr * offset + gp_texts['channel_group'].append(None) # data group gp['data_group'] = DataGroup() @@ -2594,9 +2672,11 @@ def extract_attachment(self, index): if flags & v4c.FLAG_AT_EMBEDDED: data = attachment.extract() - file_path = texts['file_name_addr']['text']\ - .decode('utf-8')\ + file_path = ( + texts['file_name_addr']['text'] + .decode('utf-8') .strip(' \n\t\0') + ) out_path = os.path.dirname(file_path) if out_path: if not os.path.exists(out_path): @@ -2609,24 +2689,28 @@ def extract_attachment(self, index): else: # for external attachments read the file and return the content if flags & v4c.FLAG_AT_MD5_VALID: - file_path = texts['file_name_addr']['text']\ - .decode('utf-8')\ + file_path = ( + texts['file_name_addr']['text'] + .decode('utf-8') .strip(' \n\t\0') + ) data = open(file_path, 'rb').read() md5_worker = md5() md5_worker.update(data) md5_sum = md5_worker.digest() if attachment['md5_sum'] == md5_sum: - if texts['mime_addr']['text']\ - .decode('utf-8')\ - .startswith('text'): + if (texts['mime_addr']['text'] + .decode('utf-8') + .startswith('text')): with open(file_path, 'r') as f: data = f.read() return data else: - message = ('ATBLOCK md5sum="{}" ' - 'and external attachment data ({}) ' - 'md5sum="{}"') + message = ( + 'ATBLOCK md5sum="{}" ' + 'and external attachment data ({}) ' + 'md5sum="{}"' + ) message = message.format( attachment['md5_sum'], file_path, @@ -2634,9 +2718,9 @@ def extract_attachment(self, index): ) warnings.warn(message) else: - if texts['mime_addr']['text']\ - .decode('utf-8')\ - .startswith('text'): + if (texts['mime_addr']['text'] + .decode('utf-8') + .startswith('text')): mode = 'r' else: mode = 'rb' @@ -2647,6 +2731,7 @@ def extract_attachment(self, index): os.chdir(current_path) message = 'Exception during attachment extraction: ' + repr(err) warnings.warn(message) + return b'' def get_channel_unit(self, name=None, group=None, index=None): """Gets channel unit. @@ -2697,40 +2782,45 @@ def get_channel_unit(self, name=None, group=None, index=None): else: stream = self._tempfile - conv_texts = grp['texts']['conversions'][ch_nr] - channel_texts = grp['texts']['channels'][ch_nr] + channel = grp['channels'][ch_nr] + conversion = grp['channel_conversions'][ch_nr] - if conv_texts and 'unit_addr' in conv_texts: - if not self.memory == 'minimum': - unit = conv_texts['unit_addr'] - else: + if self.memory == 'minimum': + + channel = Channel( + address=channel, + stream=stream, + ) + + if conversion: + conversion = ChannelConversion( + address=conversion, + stream=stream, + ) + + address = ( + conversion and conversion['unit_addr'] + or channel['unit_addr'] + or 0 + ) + + if address: unit = TextBlock( - address=conv_texts['unit_addr'], + address=address, stream=stream, ) - if PYVERSION == 3: - try: - unit = unit['text'].decode('utf-8').strip(' \n\t\0') - except UnicodeDecodeError: - unit = '' - else: - unit = unit['text'].strip(' \n\t\0') - else: - # search for physical unit in channel texts - if 'unit_addr' in channel_texts: - if not self.memory == 'minimum': - unit = channel_texts['unit_addr'] - else: - unit = TextBlock( - address=channel_texts['unit_addr'], - stream=stream, - ) if PYVERSION == 3: unit = unit['text'].decode('utf-8').strip(' \n\t\0') else: unit = unit['text'].strip(' \n\t\0') else: unit = '' + else: + unit = ( + conversion and conversion.unit + or channel.unit + or '' + ) return unit @@ -2778,32 +2868,46 @@ def get_channel_comment(self, name=None, group=None, index=None): grp = self.groups[gp_nr] - channel_texts = grp['texts']['channels'][ch_nr] - if grp['data_location'] == v4c.LOCATION_ORIGINAL_FILE: stream = self._file else: stream = self._tempfile - if 'comment_addr' in channel_texts: - if self.memory == 'minimum': - comment = TextBlock( - address=channel_texts['comment_addr'], + channel = grp['channels'][ch_nr] + + if self.memory == 'minimum': + channel = Channel( + address=channel, + stream=stream, + ) + + address = channel['comment_addr'] + if address: + comment_block = TextBlock( + address=address, stream=stream, ) + comment = ( + comment_block['text'] + .decode('utf-8') + .strip(' \r\n\t\0') + ) + if comment_block['id'] == b'##MD': + match = TX.search(comment) + if match: + comment = match.group('text') + else: + comment = '' else: - comment = channel_texts['comment_addr'] - - if comment['id'] == b'##MD': - comment = comment['text'].decode('utf-8').strip(' \n\t\0') - try: - comment = XML.fromstring(comment).find('TX').text - except: - comment = '' - else: - comment = comment['text'].decode('utf-8') + comment = '' else: - comment = '' + comment = channel.comment + if channel.comment_type == b'##MD': + match = TX.search(comment) + if match: + comment = match.group('text') + else: + comment = '' return comment @@ -2813,7 +2917,8 @@ def get(self, index=None, raster=None, samples_only=False, - data=None): + data=None, + raw=False): """Gets channel samples. Channel can be specified in two ways: @@ -2845,6 +2950,11 @@ def get(self, samples_only : bool if *True* return only the channel samples as numpy array; if *False* return a *Signal* object + data : bytes + prevent redundant data read by providing the raw data group samples + raw : bool + return channel samples without appling the conversion rule; default + `False` Returns ------- @@ -2854,18 +2964,74 @@ def get(self, The *Signal* samples are: * numpy recarray for channels that have composition/channel - array address or for channel of type BYTEARRAY, CANOPENDATE, - CANOPENTIME + array address or for channel of type BYTEARRAY, + CANOPENDATE, CANOPENTIME * numpy array for all the rest Raises ------ - MdfError : + MdfException : * if the channel name is not found * if the group index is out of range * if the channel index is out of range + Examples + -------- + >>> from asammdf import MDF, Signal + >>> import numpy as np + >>> t = np.arange(5) + >>> s = np.ones(5) + >>> mdf = MDF(version='4.10') + >>> for i in range(4): + ... sigs = [Signal(s*(i*10+j), t, name='Sig') for j in range(1, 4)] + ... mdf.append(sigs) + ... + >>> # first group and channel index of the specified channel name + ... + >>> mdf.get('Sig') + UserWarning: Multiple occurances for channel "Sig". Using first occurance from data group 4. Provide both "group" and "index" arguments to select another data group + + >>> # first channel index in the specified group + ... + >>> mdf.get('Sig', 1) + + >>> # channel named Sig from group 1 channel index 2 + ... + >>> mdf.get('Sig', 1, 2) + + >>> # channel index 1 or group 2 + ... + >>> mdf.get(None, 2, 1) + + >>> mdf.get(group=2, index=1) + + """ gp_nr, ch_nr = self._validate_channel_selection( name, @@ -2894,11 +3060,15 @@ def get(self, conversion = None if name is None: name = TextBlock( - address=grp['texts']['channels'][ch_nr]['name_addr'], + address=channel['name_addr'], stream=stream, ) - name = name['text'].decode('utf-8').strip(' \r\t\n\0') - name = name.split('\\')[0] + name = ( + name['text'] + .decode('utf-8') + .strip(' \r\t\n\0') + .split('\\')[0] + ) channel.name = name else: channel = grp['channels'][ch_nr] @@ -2931,7 +3101,9 @@ def get(self, arrays = [] name = channel.name - if all(isinstance(dep, Channel) for dep in dependency_list): + if all( + not isinstance(dep, ChannelArrayBlock) + for dep in dependency_list): # structure channel composition if memory == 'minimum': names = [] @@ -2941,21 +3113,20 @@ def get(self, address=address, stream=stream, ) - block = TextBlock( - address=channel['name_addr'], - stream=stream, - ) - name_ = block['text'].decode('utf-8') - name_ = name_.split('\\')[0].strip(' \n\r\t\0') + + name_ = get_text_v4(channel['name_addr'], stream) names.append(name_) else: names = [ch.name for ch in dependency_list] arrays = [ - self.get(name_, samples_only=True) + self.get(name_, samples_only=True, raw=raw) for name_ in names ] - types = [(name_, arr.dtype) for name_, arr in zip(names, arrays)] + types = [ + (name_, arr.dtype) + for name_, arr in zip(names, arrays) + ] if PYVERSION == 2: types = fix_dtype_fields(types) types = dtype(types) @@ -2986,13 +3157,14 @@ def get(self, else: record = grp['record'] + record.setflags(write=False) + vals = record[parent] else: vals = self._get_not_byte_aligned_data(data, grp, ch_nr) dep = dependency_list[0] if dep['flags'] & v4c.FLAG_CA_INVERSE_LAYOUT: - shape = vals.shape shape = (shape[0],) + shape[1:][::-1] vals = vals.reshape(shape) @@ -3006,7 +3178,7 @@ def get(self, dims_nr = ca_block['dims'] if ca_block['ca_type'] == v4c.CA_TYPE_SCALE_AXIS: - shape = (ca_block['dim_size_0'], ) + shape = (ca_block['dim_size_0'],) arrays.append(vals) dtype_pair = channel.name, vals.dtype, shape types.append(dtype_pair) @@ -3019,7 +3191,7 @@ def get(self, if ca_block['flags'] & v4c.FLAG_CA_FIXED_AXIS: for i in range(dims_nr): - shape = (ca_block['dim_size_{}'.format(i)], ) + shape = (ca_block['dim_size_{}'.format(i)],) axis = [] for j in range(shape[0]): key = 'axis_{}_value_{}'.format(i, j) @@ -3036,18 +3208,27 @@ def get(self, for i in range(dims_nr): ch_nr, dg_nr = ca_block.referenced_channels[i] if memory == 'minimum': - axisname = self.groups[dg_nr]['texts']['channels'][ch_nr]['name_addr'] - block = TextBlock(address=axisname, - stream=self._file) - axisname = block['text'].decode('utf-8').strip(' \t\n\r\0') - axisname = axisname.split('\\')[0] + channel = Channel( + address=self.groups[dg_nr]['channels'][ch_nr], + stream=stream, + ) + axisname = get_text_v4( + channel['name_addr'], + stream, + ) else: - axisname = self.groups[dg_nr]['channels'][ch_nr].name - shape = (ca_block['dim_size_{}'.format(i)], ) + axisname = ( + self.groups[dg_nr] + ['channels'] + [ch_nr] + .name + ) + shape = (ca_block['dim_size_{}'.format(i)],) axis_values = self.get( group=dg_nr, index=ch_nr, - samples_only=True) + samples_only=True, + ) axis_values = axis_values[axisname] arrays.append(axis_values) dtype_pair = ( @@ -3068,7 +3249,7 @@ def get(self, if ca_block['flags'] & v4c.FLAG_CA_FIXED_AXIS: for i in range(dims_nr): - shape = (ca_block['dim_size_{}'.format(i)], ) + shape = (ca_block['dim_size_{}'.format(i)],) axis = [] for j in range(shape[0]): key = 'axis_{}_value_{}'.format(i, j) @@ -3081,18 +3262,27 @@ def get(self, for i in range(dims_nr): ch_nr, dg_nr = ca_block.referenced_channels[i] if memory == 'minimum': - axisname = self.groups[dg_nr]['texts']['channels'][ch_nr]['name_addr'] - block = TextBlock(address=axisname, - stream=self._file) - axisname = block['text'].decode('utf-8').strip(' \t\n\r\0') - axisname = axisname.split('\\')[0] + channel = Channel( + address=self.groups[dg_nr]['channels'][ch_nr], + stream=stream, + ) + axisname = get_text_v4( + channel['name_addr'], + stream, + ) else: - axisname = self.groups[dg_nr]['channels'][ch_nr].name - shape = (ca_block['dim_size_{}'.format(i)], ) + axisname = ( + self.groups[dg_nr] + ['channels'] + [ch_nr] + .name + ) + shape = (ca_block['dim_size_{}'.format(i)],) axis_values = self.get( group=dg_nr, index=ch_nr, - samples_only=True) + samples_only=True, + ) axis_values = axis_values[axisname] arrays.append(axis_values) dtype_pair = axisname, axis_values.dtype, shape @@ -3107,9 +3297,10 @@ def get(self, if channel['channel_type'] in (v4c.CHANNEL_TYPE_VIRTUAL, v4c.CHANNEL_TYPE_VIRTUAL_MASTER): data_type = channel['data_type'] - ch_dtype = dtype(get_fmt(data_type, 8)) + ch_dtype = dtype(get_fmt_v4(data_type, 8)) vals = arange(cycles_nr, dtype=ch_dtype) + record = None else: try: parent, bit_offset = parents[ch_nr] @@ -3128,13 +3319,19 @@ def get(self, else: record = grp['record'] + record.setflags(write=False) + vals = record[parent] bits = channel['bit_count'] size = vals.dtype.itemsize data_type = channel['data_type'] if vals.dtype.kind not in 'ui' and (bit_offset or not bits == size * 8): - vals = self._get_not_byte_aligned_data(data, grp, ch_nr) + vals = self._get_not_byte_aligned_data( + data, + grp, + ch_nr, + ) else: if bit_offset: dtype_ = vals.dtype @@ -3150,12 +3347,10 @@ def get(self, vals &= mask else: vals = vals & mask + if data_type in v4c.SIGNED_INT: - size = vals.dtype.itemsize - mask = (1 << (size * 8)) - 1 - mask = (mask << bits) & mask - vals |= mask - vals = vals.astype('> 3, 1) - ch_fmt = get_fmt(channel['data_type'], size) + ch_fmt = get_fmt_v4(channel['data_type'], size) vals = array(res).astype(ch_fmt) # else FLOAT channel @@ -3394,12 +3594,12 @@ def get(self, else: res.append(default) size = max(bits >> 3, 1) - ch_fmt = get_fmt(channel['data_type'], size) + ch_fmt = get_fmt_v4(channel['data_type'], size) vals = array(res).astype(ch_fmt) elif conversion_type == v4c.CONVERSION_TYPE_TABX: nr = conversion['val_param_nr'] - raw = array( + raw_vals = array( [conversion['val_{}'.format(i)] for i in range(nr)] ) @@ -3408,33 +3608,40 @@ def get(self, [grp['texts']['conversion_tab'][ch_nr]['text_{}'.format(i)]['text'] for i in range(nr)] ) - default = grp['texts']['conversion_tab'][ch_nr]\ - .get('default_addr', {})\ + default = grp['texts']['conversion_tab'][ch_nr] \ + .get('default_addr', {}) \ .get('text', b'') else: phys = [] for i in range(nr): - address=grp['texts']['conversion_tab'][ch_nr]['text_{}'.format(i)] + address = ( + grp['texts'] + ['conversion_tab'] + [ch_nr] + ['text_{}'.format(i)] + ) if address: block = TextBlock( address=address, - stream=self._file, + stream=stream, ) phys.append(block['text']) else: phys.append(b'') phys = array(phys) - if grp['texts']['conversion_tab'][ch_nr].get('default_addr', 0): + if grp['texts']['conversion_tab'][ch_nr].get( + 'default_addr', + 0): block = TextBlock( address=grp['texts']['conversion_tab'][ch_nr]['default_addr'], - stream=self._file, + stream=stream, ) default = block['text'] else: default = b'' info = { - 'raw': raw, + 'raw': raw_vals, 'phys': phys, 'default': default, } @@ -3447,26 +3654,28 @@ def get(self, [grp['texts']['conversion_tab'][ch_nr]['text_{}'.format(i)]['text'] for i in range(nr)] ) - default = grp['texts']['conversion_tab'][ch_nr]\ - .get('default_addr', {})\ + default = grp['texts']['conversion_tab'][ch_nr] \ + .get('default_addr', {}) \ .get('text', b'') else: phys = [] for i in range(nr): - address=grp['texts']['conversion_tab'][ch_nr]['text_{}'.format(i)] + address = grp['texts']['conversion_tab'][ch_nr]['text_{}'.format(i)] if address: block = TextBlock( address=address, - stream=self._file, + stream=stream, ) phys.append(block['text']) else: phys.append(b'') phys = array(phys) - if grp['texts']['conversion_tab'][ch_nr].get('default_addr', 0): + if grp['texts']['conversion_tab'][ch_nr].get( + 'default_addr', + 0): block = TextBlock( address=grp['texts']['conversion_tab'][ch_nr]['default_addr'], - stream=self._file, + stream=stream, ) default = block['text'] else: @@ -3489,16 +3698,16 @@ def get(self, nr = conversion['val_param_nr'] - 1 if memory == 'minimum': - raw = [] + raw_vals = [] for i in range(nr): block = TextBlock( address=grp['texts']['conversion_tab'][ch_nr]['text_{}'.format(i)], stream=stream, ) - raw.append(block['text']) - raw = array(raw) + raw_vals.append(block['text']) + raw_vals = array(raw_vals) else: - raw = array( + raw_vals = array( [grp['texts']['conversion_tab'][ch_nr]['text_{}'.format(i)]['text'] for i in range(nr)] ) @@ -3507,7 +3716,7 @@ def get(self, ) default = conversion['val_default'] info = { - 'raw': raw, + 'raw': raw_vals, 'phys': phys, 'default': default, } @@ -3568,12 +3777,16 @@ def get(self, valid_index = None if grp['channel_group']['invalidation_bytes_nr']: - if channel['flags'] & (v4c.FLAG_INVALIDATION_BIT_VALID | v4c.FLAG_ALL_SAMPLES_VALID) == v4c.FLAG_INVALIDATION_BIT_VALID: - + if channel['flags'] & ( + v4c.FLAG_INVALIDATION_BIT_VALID | v4c.FLAG_ALL_SAMPLES_VALID) == v4c.FLAG_INVALIDATION_BIT_VALID: ch_invalidation_pos = channel['pos_invalidation_bit'] - pos_byte, pos_offset = divmod(ch_invalidation_pos) + pos_byte, pos_offset = divmod(ch_invalidation_pos, 8) mask = 1 << pos_offset + if record is None: + record = fromstring(data, dtype=dtypes) + record.setflags(write=False) + inval_bytes = record['invalidation_bytes'] inval_index = array( [bytes_[pos_byte] & mask for bytes_ in inval_bytes] @@ -3585,61 +3798,48 @@ def get(self, res = vals else: # search for unit in conversion texts - conv_texts = grp['texts']['conversions'][ch_nr] - channel_texts = grp['texts']['channels'][ch_nr] - if conv_texts and 'unit_addr' in conv_texts: - if not memory == 'minimum': - unit = conv_texts['unit_addr'] - else: - unit = TextBlock( - address=conv_texts['unit_addr'], + if memory == 'minimum': + + address = ( + conversion and conversion['unit_addr'] + or channel['unit_addr'] + or 0 + ) + if address: + unit = get_text_v4( + address=address, stream=stream, ) - if PYVERSION == 3: - try: - unit = unit['text'].decode('utf-8').strip(' \n\t\0') - except UnicodeDecodeError: - unit = '' - else: - unit = unit['text'].strip(' \n\t\0') - else: - # search for physical unit in channel texts - if 'unit_addr' in channel_texts: - if not memory == 'minimum': - unit = channel_texts['unit_addr'] - else: - unit = TextBlock( - address=channel_texts['unit_addr'], - stream=stream, - ) - if PYVERSION == 3: - unit = unit['text'].decode('utf-8').strip(' \n\t\0') - else: - unit = unit['text'].strip(' \n\t\0') else: unit = '' - # get the channel commment if available - if 'comment_addr' in channel_texts: - if memory == 'minimum': - comment = TextBlock( - address=channel_texts['comment_addr'], + address = channel['comment_addr'] + if address: + comment = get_text_v4( + address=address, stream=stream, ) - else: - comment = channel_texts['comment_addr'] - if comment['id'] == b'##MD': - comment = comment['text'].decode('utf-8').strip(' \n\t\0') - try: - comment = XML.fromstring(comment).find('TX').text - except: - comment = '' + if channel.comment_type == b'##MD': + match = TX.search(comment) + if match: + comment = match.group('text') + else: + comment = '' else: - comment = comment['text'].decode('utf-8') + comment = '' else: - comment = '' + unit = ( + conversion and conversion.unit + or channel.unit + or '' + ) + comment = channel.comment + if channel.comment_type == b'##MD': + match = TX.search(comment) + if match: + comment = match.group('text') t = self.get_master(gp_nr, data) @@ -3742,6 +3942,8 @@ def get_master(self, index, data=None): if memory == 'full': group['record'] = record + + record.setflags(write=False) t = record[parent] else: t = self._get_not_byte_aligned_data( @@ -3771,8 +3973,8 @@ def info(self): """ info = {} - info['version'] = self.identification['version_str']\ - .decode('utf-8')\ + info['version'] = self.identification['version_str'] \ + .decode('utf-8') \ .strip(' \n\t\0') info['groups'] = len(self.groups) for i, gp in enumerate(self.groups): @@ -3791,17 +3993,23 @@ def info(self): stream=stream, ) name = TextBlock( - address=gp['texts']['channels'][j]['name_addr'], + address=channel['name_addr'], stream=stream, ) - name = name['text'].decode('utf-8').strip(' \r\t\n\0') - name = name.split('\\')[0] - channel.name = name + name = ( + name['text'] + .decode('utf-8') + .strip(' \r\t\n\0') + .split('\\')[0] + ) else: name = channel.name ch_type = v4c.CHANNEL_TYPE_TO_DESCRIPTION[channel['channel_type']] - inf['channel {}'.format(j)] = 'name="{}" type={}'.format(name, ch_type) + inf['channel {}'.format(j)] = 'name="{}" type={}'.format( + name, + ch_type, + ) return info @@ -3826,19 +4034,36 @@ def save(self, dst='', overwrite=None, compression=0): * 2 - transposition + deflate (slowest, but produces the smallest files) + Returns + ------- + output_file : str + output file name + """ if overwrite is None: overwrite = self._overwrite + output_file = '' if self.name is None and dst == '': - message = ('Must specify a destination file name ' - 'for MDF created from scratch') + message = ( + 'Must specify a destination file name ' + 'for MDF created from scratch' + ) raise MdfException(message) else: if self.memory == 'minimum': - self._save_without_metadata(dst, overwrite, compression) + output_file = self._save_without_metadata( + dst, + overwrite, + compression, + ) else: - self._save_with_metadata(dst, overwrite, compression) + output_file = self._save_with_metadata( + dst, + overwrite, + compression, + ) + return output_file def _save_with_metadata(self, dst, overwrite, compression): """Save MDF to *dst*. If *dst* is not provided the the destination file @@ -3868,6 +4093,8 @@ def _save_with_metadata(self, dst, overwrite, compression): raise MdfException(message) dst = dst if dst else self.name + if not dst.endswith(('mf4', 'MF4')): + dst = dst + '.mf4' if overwrite is False: if os.path.isfile(dst): cntr = 0 @@ -3877,8 +4104,10 @@ def _save_with_metadata(self, dst, overwrite, compression): break else: cntr += 1 - message = ('Destination file "{}" already exists ' - 'and "overwrite" is False. Saving MDF file as "{}"') + message = ( + 'Destination file "{}" already exists ' + 'and "overwrite" is False. Saving MDF file as "{}"' + ) message = message.format(dst, name) warnings.warn(message) dst = name @@ -3936,8 +4165,11 @@ def _save_with_metadata(self, dst, overwrite, compression): samples_size = gp['channel_group']['samples_byte_nr'] split_size = MDF4._split_threshold // samples_size split_size *= samples_size - chunks = len(data) / split_size - chunks = ceil(chunks) + if split_size == 0: + chunks = 1 + else: + chunks = len(data) / split_size + chunks = int(ceil(chunks)) else: chunks = 1 @@ -3959,7 +4191,7 @@ def _save_with_metadata(self, dst, overwrite, compression): align = data_block['block_len'] % 8 if align: - write(b'\0' * (8-align)) + write(b'\0' * (8 - align)) if gp['channel_group']['cycles_nr']: gp['data_group']['data_block_addr'] = address @@ -3982,7 +4214,7 @@ def _save_with_metadata(self, dst, overwrite, compression): data_blocks = [] for i in range(chunks): - data_ = data[i*split_size: (i+1)*split_size] + data_ = data[i * split_size: (i + 1) * split_size] if compression and self.version != '4.00': if compression == 1: zip_type = v4c.FLAG_DZ_DEFLATE @@ -4008,7 +4240,7 @@ def _save_with_metadata(self, dst, overwrite, compression): align = block['block_len'] % 8 if align: - write(b'\0' * (8-align)) + write(b'\0' * (8 - align)) dl_block['data_block_addr{}'.format(i)] = address address = tell() @@ -4052,7 +4284,7 @@ def _save_with_metadata(self, dst, overwrite, compression): address += at_block['block_len'] for i, (at_block, text) in enumerate(self.attachments[:-1]): - at_block['next_at_addr'] = self.attachments[i+1][0].address + at_block['next_at_addr'] = self.attachments[i + 1][0].address self.attachments[-1][0]['next_at_addr'] = 0 # file history blocks @@ -4069,11 +4301,14 @@ def _save_with_metadata(self, dst, overwrite, compression): blocks.append(fh) for i, (fh, fh_text) in enumerate(self.file_history[:-1]): - fh['next_fh_addr'] = self.file_history[i+1][0].address + fh['next_fh_addr'] = self.file_history[i + 1][0].address self.file_history[-1][0]['next_fh_addr'] = 0 # data groups + gp_rec_ids = [] for gp in self.groups: + gp_rec_ids.append(gp['data_group']['record_id_len']) + gp['data_group']['record_id_len'] = 0 gp['data_group'].address = address address += gp['data_group']['block_len'] blocks.append(gp['data_group']) @@ -4081,7 +4316,7 @@ def _save_with_metadata(self, dst, overwrite, compression): gp['data_group']['comment_addr'] = 0 for i, dg in enumerate(self.groups[:-1]): - addr_ = self.groups[i+1]['data_group'].address + addr_ = self.groups[i + 1]['data_group'].address dg['data_group']['next_dg_addr'] = addr_ self.groups[-1]['data_group']['next_dg_addr'] = 0 @@ -4092,6 +4327,8 @@ def _save_with_metadata(self, dst, overwrite, compression): v4c.CONVERSION_TYPE_TRANS, ) + si_map = {} + # go through each data group and append the rest of the blocks for i, gp in enumerate(self.groups): # write TXBLOCK's @@ -4110,20 +4347,166 @@ def _save_with_metadata(self, dst, overwrite, compression): address += tx_block['block_len'] blocks.append(tx_block) + for channel in gp['channels']: + if channel.name: + tx_block = TextBlock(text=channel.name) + text = tx_block['text'] + if text in defined_texts: + channel['name_addr'] = defined_texts[text] + else: + channel['name_addr'] = address + defined_texts[text] = address + tx_block.address = address + address += tx_block['block_len'] + blocks.append(tx_block) + else: + channel['name_addr'] = 0 + + if channel.unit: + tx_block = TextBlock(text=channel.unit) + text = tx_block['text'] + if text in defined_texts: + channel['unit_addr'] = defined_texts[text] + else: + channel['unit_addr'] = address + defined_texts[text] = address + tx_block.address = address + address += tx_block['block_len'] + blocks.append(tx_block) + else: + channel['unit_addr'] = 0 + + if channel.comment: + meta = channel.comment_type == b'##MD' + tx_block = TextBlock(text=channel.comment, meta=meta) + text = tx_block['text'] + if text in defined_texts: + channel['comment_addr'] = defined_texts[text] + else: + channel['comment_addr'] = address + defined_texts[text] = address + tx_block.address = address + address += tx_block['block_len'] + blocks.append(tx_block) + else: + channel['comment_addr'] = 0 + + for source in gp['channel_sources']: + if source: + if source.name: + tx_block = TextBlock(text=source.name) + text = tx_block['text'] + if text in defined_texts: + source['name_addr'] = defined_texts[text] + else: + source['name_addr'] = address + defined_texts[text] = address + tx_block.address = address + address += tx_block['block_len'] + blocks.append(tx_block) + else: + source['name_addr'] = 0 + + if source.path: + tx_block = TextBlock(text=source.path) + text = tx_block['text'] + if text in defined_texts: + source['path_addr'] = defined_texts[text] + else: + source['path_addr'] = address + defined_texts[text] = address + tx_block.address = address + address += tx_block['block_len'] + blocks.append(tx_block) + else: + source['path_addr'] = 0 + + if source.comment: + tx_block = TextBlock(text=source.comment) + text = tx_block['text'] + if text in defined_texts: + source['comment_addr'] = defined_texts[text] + else: + source['comment_addr'] = address + defined_texts[text] = address + tx_block.address = address + address += tx_block['block_len'] + blocks.append(tx_block) + else: + source['comment_addr'] = 0 + + for conversion in gp['channel_conversions']: + if conversion: + if conversion.name: + tx_block = TextBlock(text=conversion.name) + text = tx_block['text'] + if text in defined_texts: + conversion['name_addr'] = defined_texts[text] + else: + conversion['name_addr'] = address + defined_texts[text] = address + tx_block.address = address + address += tx_block['block_len'] + blocks.append(tx_block) + else: + conversion['name_addr'] = 0 + + if conversion.unit: + tx_block = TextBlock(text=conversion.unit) + text = tx_block['text'] + if text in defined_texts: + conversion['unit_addr'] = defined_texts[text] + else: + conversion['unit_addr'] = address + defined_texts[text] = address + tx_block.address = address + address += tx_block['block_len'] + blocks.append(tx_block) + else: + conversion['unit_addr'] = 0 + + if conversion.comment: + tx_block = TextBlock(text=conversion.comment) + text = tx_block['text'] + if text in defined_texts: + conversion['comment_addr'] = defined_texts[text] + else: + conversion['comment_addr'] = address + defined_texts[text] = address + tx_block.address = address + address += tx_block['block_len'] + blocks.append(tx_block) + else: + conversion['comment_addr'] = 0 + + if conversion['conversion_type'] == v4c.CONVERSION_TYPE_ALG and conversion.formula: + tx_block = TextBlock(text=conversion.formula) + text = tx_block['text'] + if text in defined_texts: + conversion['formula_addr'] = defined_texts[text] + else: + conversion['formula_addr'] = address + defined_texts[text] = address + tx_block.address = address + address += tx_block['block_len'] + blocks.append(tx_block) + # channel conversions for j, conv in enumerate(gp['channel_conversions']): if conv: conv.address = address - conv_texts = gp['texts']['conversions'][j] - if conv_texts: - for key, text_block in conv_texts.items(): - conv[key] = text_block.address - conv['inv_conv_addr'] = 0 + conv['inv_conv_addr'] = 0 if conv['conversion_type'] in tab_conversion: for key in gp['texts']['conversion_tab'][j]: - conv[key] = gp['texts']['conversion_tab'][j][key].address + conv[key] = ( + gp['texts'] + ['conversion_tab'] + [j] + [key] + .address + ) address += conv['block_len'] blocks.append(conv) @@ -4131,17 +4514,12 @@ def _save_with_metadata(self, dst, overwrite, compression): # channel sources for j, source in enumerate(gp['channel_sources']): if source: - source.address = address - source_texts = gp['texts']['sources'][j] - - for key in ('name_addr', 'path_addr', 'comment_addr'): - if source_texts and key in source_texts: - source[key] = source_texts[key].address - else: - source[key] = 0 - - address += source['block_len'] - blocks.append(source) + source_id = id(source) + if source_id not in si_map: + source.address = address + address += source['block_len'] + blocks.append(source) + si_map[source_id] = 0 # channel data gp_sd = gp['signal_data'] = [] @@ -4155,7 +4533,7 @@ def _save_with_metadata(self, dst, overwrite, compression): align = signal_data['block_len'] % 8 if align % 8: blocks.append(b'\0' * (8 - align)) - address += 8 - align + address += 8 - align gp_sd.append(signal_data) else: gp_sd.append(None) @@ -4170,24 +4548,17 @@ def _save_with_metadata(self, dst, overwrite, compression): address += dep['block_len'] blocks.append(dep) for k, dep in enumerate(dep_list[:-1]): - dep['composition_addr'] = dep_list[k+1].address + dep['composition_addr'] = dep_list[k + 1].address dep_list[-1]['composition_addr'] = 0 # channels - for j, (channel, signal_data) in enumerate(zip(gp['channels'], gp['signal_data'])): + for j, (channel, signal_data) in enumerate( + zip(gp['channels'], gp['signal_data'])): channel.address = address - channel_texts = gp['texts']['channels'][j] address += channel['block_len'] blocks.append(channel) - for key in ('comment_addr', 'unit_addr'): - if key in channel_texts: - channel[key] = channel_texts[key].address - else: - channel[key] = 0 - channel['name_addr'] = channel_texts['name_addr'].address - if not gp['channel_conversions'][j]: channel['conversion_addr'] = 0 else: @@ -4210,9 +4581,25 @@ def _save_with_metadata(self, dst, overwrite, compression): group_channels = gp['channels'] if group_channels: for j, channel in enumerate(group_channels[:-1]): - channel['next_ch_addr'] = group_channels[j+1].address + channel['next_ch_addr'] = group_channels[j + 1].address group_channels[-1]['next_ch_addr'] = 0 + # channel dependecies + j = 0 + while j < len(gp['channels']): + dep_list = gp['channel_dependencies'][j] + if dep_list and all( + isinstance(dep, Channel) for dep in dep_list): + gp['channels'][j]['component_addr'] = dep_list[0].address + gp['channels'][j]['next_ch_addr'] = dep_list[-1]['next_ch_addr'] + dep_list[-1]['next_ch_addr'] = 0 + j += len(dep_list) + + for dep in dep_list: + dep['source_addr'] = 0 + else: + j += 1 + # channel group gp['channel_group'].address = address gp['channel_group']['first_ch_addr'] = gp['channels'][0].address @@ -4244,6 +4631,9 @@ def _save_with_metadata(self, dst, overwrite, compression): for block in blocks: write(bytes(block)) + for gp, rec_id in zip(self.groups, gp_rec_ids): + gp['data_group']['record_id_len'] = rec_id + if self.groups: addr_ = self.groups[0]['data_group'].address self.header['first_dg_addr'] = addr_ @@ -4287,6 +4677,8 @@ def _save_with_metadata(self, dst, overwrite, compression): self._file = open(self.name, 'rb') self._read() + return dst + def _save_without_metadata(self, dst, overwrite, compression): """Save MDF to *dst*. If *dst* is not provided the the destination file name is the MDF name. If overwrite is *True* then the destination file @@ -4310,11 +4702,15 @@ def _save_without_metadata(self, dst, overwrite, compression): """ if self.name is None and dst == '': - message = ('Must specify a destination file name ' - 'for MDF created from scratch') + message = ( + 'Must specify a destination file name ' + 'for MDF created from scratch' + ) raise MdfException(message) dst = dst if dst else self.name + if not dst.endswith(('mf4', 'MF4')): + dst = dst + '.mf4' if overwrite is False: if os.path.isfile(dst): cntr = 0 @@ -4324,8 +4720,10 @@ def _save_without_metadata(self, dst, overwrite, compression): break else: cntr += 1 - message = ('Destination file "{}" already exists ' - 'and "overwrite" is False. Saving MDF file as "{}"') + message = ( + 'Destination file "{}" already exists ' + 'and "overwrite" is False. Saving MDF file as "{}"' + ) message = message.format(dst, name) warnings.warn(message) dst = name @@ -4383,8 +4781,11 @@ def _save_without_metadata(self, dst, overwrite, compression): samples_size = gp['channel_group']['samples_byte_nr'] split_size = MDF4._split_threshold // samples_size split_size *= samples_size - chunks = len(data) / split_size - chunks = ceil(chunks) + if split_size == 0: + chunks = 1 + else: + chunks = len(data) / split_size + chunks = int(ceil(chunks)) else: chunks = 1 @@ -4406,7 +4807,7 @@ def _save_without_metadata(self, dst, overwrite, compression): align = data_block['block_len'] % 8 if align: - write(b'\0' * (8-align)) + write(b'\0' * (8 - align)) if gp['channel_group']['cycles_nr']: gp['data_group']['data_block_addr'] = address @@ -4429,7 +4830,7 @@ def _save_without_metadata(self, dst, overwrite, compression): data_blocks = [] for i in range(chunks): - data_ = data[i*split_size: (i+1)*split_size] + data_ = data[i * split_size: (i + 1) * split_size] if compression and self.version != '4.00': if compression == 1: zip_type = v4c.FLAG_DZ_DEFLATE @@ -4455,7 +4856,7 @@ def _save_without_metadata(self, dst, overwrite, compression): align = block['block_len'] % 8 if align: - write(b'\0' * (8-align)) + write(b'\0' * (8 - align)) dl_block['data_block_addr{}'.format(i)] = address address = tell() @@ -4499,7 +4900,7 @@ def _save_without_metadata(self, dst, overwrite, compression): address += at_block['block_len'] for i, (at_block, text) in enumerate(self.attachments[:-1]): - at_block['next_at_addr'] = self.attachments[i+1][0].address + at_block['next_at_addr'] = self.attachments[i + 1][0].address self.attachments[-1][0]['next_at_addr'] = 0 # file history blocks @@ -4516,7 +4917,7 @@ def _save_without_metadata(self, dst, overwrite, compression): blocks.append(fh) for i, (fh, fh_text) in enumerate(self.file_history[:-1]): - fh['next_fh_addr'] = self.file_history[i+1][0].address + fh['next_fh_addr'] = self.file_history[i + 1][0].address self.file_history[-1][0]['next_fh_addr'] = 0 for blk in blocks: @@ -4531,8 +4932,14 @@ def _save_without_metadata(self, dst, overwrite, compression): v4c.CONVERSION_TYPE_TRANS, ) + si_map = {} + # go through each data group and append the rest of the blocks for i, gp in enumerate(self.groups): + gp['temp_channels'] = ch_addrs = [] + gp['temp_channel_conversions'] = cc_addrs = [] + gp['temp_channel_sources'] = si_addrs = [] + if gp['data_location'] == v4c.LOCATION_ORIGINAL_FILE: stream = self._file else: @@ -4542,7 +4949,7 @@ def _save_without_metadata(self, dst, overwrite, compression): # write TXBLOCK's for item_list in temp_texts.values(): for dict_ in item_list: - if dict_ is None: + if not dict_: continue for key, tx_block in dict_.items(): # text blocks can be shared @@ -4559,96 +4966,253 @@ def _save_without_metadata(self, dst, overwrite, compression): dict_[key] = address write(bytes(block)) - # channel conversions - gp['temp_channel_conversions'] = [] - for j, conv in enumerate(gp['channel_conversions']): - if conv: - address = tell() - gp['temp_channel_conversions'].append(address) - conv = ChannelConversion( - address=conv, - stream=stream, - ) - - conv.address = address - conv_texts = temp_texts['conversions'][j] + for source in gp['channel_sources']: + if source: + stream.seek(source, v4c.SEEK_START) + raw_bytes = stream.read(v4c.SI_BLOCK_SIZE) + if raw_bytes in si_map: + si_addrs.append(si_map[raw_bytes]) + else: + source = SourceInformation( + raw_bytes=raw_bytes, + ) - if conv_texts: + if source['name_addr']: + tx_block = TextBlock( + address=source['name_addr'], + stream=stream, + ) + text = tx_block['text'] + if text in defined_texts: + source['name_addr'] = defined_texts[text] + else: + address = tell() + source['name_addr'] = address + defined_texts[text] = address + tx_block.address = address + write(bytes(tx_block)) + else: + source['name_addr'] = 0 - for key, text_block in conv_texts.items(): - conv[key] = text_block - conv['inv_conv_addr'] = 0 + if source.path: + tx_block = TextBlock( + address=source['path_addr'], + stream=stream, + ) + text = tx_block['text'] + if text in defined_texts: + source['path_addr'] = defined_texts[text] + else: + address = tell() + source['path_addr'] = address + defined_texts[text] = address + tx_block.address = address + write(bytes(tx_block)) + else: + source['path_addr'] = 0 - if conv['conversion_type'] in tab_conversion: - for key in temp_texts['conversion_tab'][j]: - conv[key] = temp_texts['conversion_tab'][j][key] + if source['comment_addr']: + tx_block = TextBlock( + address=source['comment_addr'], + stream=stream, + ) + text = tx_block['text'] + if text in defined_texts: + source['comment_addr'] = defined_texts[text] + else: + address = tell() + source['comment_addr'] = address + defined_texts[text] = address + tx_block.address = address + write(bytes(tx_block)) + else: + source['comment_addr'] = 0 - write(bytes(conv)) + address = tell() + si_addrs.append(address) + si_map[raw_bytes] = address + write(bytes(source)) else: - gp['temp_channel_conversions'].append(0) + si_addrs.append(0) - # channel sources - gp['temp_channel_sources'] = [] - for j, source in enumerate(gp['channel_sources']): - if source: - address = tell() - gp['temp_channel_sources'].append(address) - source_texts = temp_texts['sources'][j] - - source = SourceInformation( - address=source, + for j, conversion in enumerate(gp['channel_conversions']): + if conversion: + conversion = ChannelConversion( + address=conversion, stream=stream, ) - for key in ('name_addr', 'path_addr', 'comment_addr'): - if source_texts and key in source_texts: - source[key] = source_texts[key] + if conversion['name_addr']: + tx_block = TextBlock( + address=conversion['name_addr'], + stream=stream, + ) + text = tx_block['text'] + if text in defined_texts: + conversion['name_addr'] = defined_texts[text] else: - source[key] = 0 + address = tell() + conversion['name_addr'] = address + defined_texts[text] = address + tx_block.address = address + write(bytes(tx_block)) + else: + conversion['name_addr'] = 0 + + if conversion['unit_addr']: + tx_block = TextBlock( + address=conversion['unit_addr'], + stream=stream, + ) + text = tx_block['text'] + if text in defined_texts: + conversion['unit_addr'] = defined_texts[text] + else: + address = tell() + conversion['unit_addr'] = address + defined_texts[text] = address + tx_block.address = address + write(bytes(tx_block)) + else: + conversion['unit_addr'] = 0 + + if conversion['comment_addr']: + tx_block = TextBlock( + address=conversion['comment_addr'], + stream=stream, + ) + text = tx_block['text'] + if text in defined_texts: + conversion['comment_addr'] = defined_texts[text] + else: + address = tell() + conversion['comment_addr'] = address + defined_texts[text] = address + tx_block.address = address + write(bytes(tx_block)) + else: + conversion['comment_addr'] = 0 + + if conversion['conversion_type'] == v4c.CONVERSION_TYPE_ALG and conversion['formula_addr']: + tx_block = TextBlock( + address=conversion['formula_addr'], + stream=stream, + ) + text = tx_block['text'] + if text in defined_texts: + conversion['formula_addr'] = defined_texts[text] + else: + address = tell() + conversion['formula_addr'] = address + defined_texts[text] = address + tx_block.address = address + write(bytes(tx_block)) + + elif conversion['conversion_type'] in tab_conversion: + for key in temp_texts['conversion_tab'][j]: + conversion[key] = temp_texts['conversion_tab'][j][key] - write(bytes(source)) + conversion['inv_conv_addr'] = 0 + + address = tell() + cc_addrs.append(address) + write(bytes(conversion)) else: - gp['temp_channel_sources'].append(0) + cc_addrs.append(0) # channel dependecies + temp_deps = [] for j, dep_list in enumerate(gp['channel_dependencies']): if dep_list: if all(isinstance(dep, ChannelArrayBlock) for dep in dep_list): + temp_deps.append([]) + for dep in dep_list: address = tell() dep.address = address + temp_deps[-1].append(address) write(bytes(dep)) for k, dep in enumerate(dep_list[:-1]): - dep['composition_addr'] = dep_list[k+1].address + dep['composition_addr'] = dep_list[k + 1].address dep_list[-1]['composition_addr'] = 0 + else: + temp_deps.append([]) + for _ in dep_list: + temp_deps[-1].append(0) + else: + temp_deps.append(0) # channels blocks = [] chans = [] - address = tell() - gp['temp_channels'] = ch_addrs = [] + address = blocks_start_addr = tell() + gp['channel_group']['first_ch_addr'] = address + + for j, channel in enumerate(gp['channels']): channel = Channel( - address=channel, - stream=stream, - ) + address=channel, + stream=stream, + ) channel.address = address - channel_texts = temp_texts['channels'][j] - ch_addrs.append(address) + chans.append(channel) + blocks.append(channel) address += channel['block_len'] - blocks.append(channel) - chans.append(channel) - for key in ('comment_addr', 'unit_addr'): - if key in channel_texts: - channel[key] = channel_texts[key] + if channel['name_addr']: + tx_block = TextBlock( + address=channel['name_addr'], + stream=stream, + ) + text = tx_block['text'] + if text in defined_texts: + channel['name_addr'] = defined_texts[text] else: - channel[key] = 0 - channel['name_addr'] = channel_texts['name_addr'] + channel['name_addr'] = address + defined_texts[text] = address + tx_block.address = address + address += tx_block['block_len'] + blocks.append(tx_block) + else: + channel['name_addr'] = 0 + + if channel['unit_addr']: + tx_block = TextBlock( + address=channel['unit_addr'], + stream=stream, + ) + text = tx_block['text'] + if text in defined_texts: + channel['unit_addr'] = defined_texts[text] + else: + channel['unit_addr'] = address + defined_texts[text] = address + tx_block.address = address + address += tx_block['block_len'] + blocks.append(tx_block) + else: + channel['unit_addr'] = 0 + + if channel['comment_addr']: + tx_block = TextBlock( + address=channel['comment_addr'], + stream=stream, + ) + text = tx_block['text'] + if text in defined_texts: + channel['comment_addr'] = defined_texts[text] + else: + channel['comment_addr'] = address + defined_texts[text] = address + tx_block.address = address + address += tx_block['block_len'] + blocks.append(tx_block) + else: + channel['comment_addr'] = 0 channel['conversion_addr'] = gp['temp_channel_conversions'][j] channel['source_addr'] = gp['temp_channel_sources'][j] @@ -4661,22 +5225,49 @@ def _save_without_metadata(self, dst, overwrite, compression): align = signal_data['block_len'] % 8 if align % 8: blocks.append(b'\0' * (8 - align)) - address += 8 - align + address += 8 - align else: channel['data_block_addr'] = 0 if gp['channel_dependencies'][j]: block = gp['channel_dependencies'][j][0] - if isinstance(block, int): - channel['component_addr'] = block - else: + if isinstance(block, (ChannelArrayBlock, Channel)): channel['component_addr'] = block.address + else: + channel['component_addr'] = block group_channels = gp['channels'] if group_channels: for j, channel in enumerate(chans[:-1]): - channel['next_ch_addr'] = chans[j+1].address + channel['next_ch_addr'] = chans[j + 1].address chans[-1]['next_ch_addr'] = 0 + + # channel dependecies + j = 0 + while j < len(gp['channels']): + dep_list = gp['channel_dependencies'][j] + if dep_list and all( + not isinstance(dep, ChannelArrayBlock) for dep in dep_list): + + dep = chans[j+1] + + channel = chans[j] + channel['component_addr'] = dep.address + + dep = chans[j+len(dep_list)] + channel['next_ch_addr'] = dep['next_ch_addr'] + dep['next_ch_addr'] = 0 + + for k, _ in enumerate(dep_list): + dep = chans[j+1+k] + dep['source_addr'] = 0 + + j += len(dep_list) + else: + j += 1 + + seek(blocks_start_addr, v4c.SEEK_START) + for block in blocks: write(bytes(block)) @@ -4686,7 +5277,6 @@ def _save_without_metadata(self, dst, overwrite, compression): # channel group gp['channel_group'].address = address - # gp['channel_group']['first_ch_addr'] = gp['channels'][0] gp['channel_group']['next_cg_addr'] = 0 cg_texts = temp_texts['channel_group'][0] for key in ('acq_name_addr', 'comment_addr'): @@ -4707,22 +5297,28 @@ def _save_without_metadata(self, dst, overwrite, compression): blocks = [] address = tell() + gp_rec_ids = [] # data groups for gp in self.groups: gp['data_group'].address = address + gp_rec_ids.append(gp['data_group']['record_id_len']) + gp['data_group']['record_id_len'] = 0 address += gp['data_group']['block_len'] blocks.append(gp['data_group']) gp['data_group']['comment_addr'] = 0 for i, dg in enumerate(self.groups[:-1]): - addr_ = self.groups[i+1]['data_group'].address + addr_ = self.groups[i + 1]['data_group'].address dg['data_group']['next_dg_addr'] = addr_ self.groups[-1]['data_group']['next_dg_addr'] = 0 for block in blocks: write(bytes(block)) + for gp, rec_id in zip(self.groups, gp_rec_ids): + gp['data_group']['record_id_len'] = rec_id + if self.groups: addr_ = self.groups[0]['data_group'].address self.header['first_dg_addr'] = addr_ @@ -4745,11 +5341,12 @@ def _save_without_metadata(self, dst, overwrite, compression): for orig_addr, gp in zip(original_data_addresses, self.groups): gp['data_group']['data_block_addr'] = orig_addr - for gp in self.groups: for dep_list in gp['channel_dependencies']: if dep_list: - if all(isinstance(dep, ChannelArrayBlock) for dep in dep_list): + if all( + isinstance(dep, ChannelArrayBlock) + for dep in dep_list): for dep in dep_list: for i, (ch_nr, gp_nr) in enumerate(dep.referenced_channels): grp = self.groups[gp_nr] @@ -4784,3 +5381,4 @@ def _save_without_metadata(self, dst, overwrite, compression): self._tempfile = TemporaryFile() self._file = open(self.name, 'rb') self._read() + return dst diff --git a/asammdf/signal.py b/asammdf/signal.py index 0b5adc1f1..63158a405 100644 --- a/asammdf/signal.py +++ b/asammdf/signal.py @@ -283,29 +283,35 @@ def cut(self, start=None, stop=None): # cut from beggining to stop stop = np.searchsorted(self.timestamps, stop, side='right') if stop: - result = Signal(self.samples[: stop], - self.timestamps[:stop], - self.unit, - self.name, - self.info, - self.comment) + result = Signal( + self.samples[: stop], + self.timestamps[:stop], + self.unit, + self.name, + self.info, + self.comment, + ) else: - result = Signal(np.array([]), - np.array([]), - self.unit, - self.name, - self.info, - self.comment) + result = Signal( + np.array([]), + np.array([]), + self.unit, + self.name, + self.info, + self.comment, + ) elif stop is None: # cut from start to end start = np.searchsorted(self.timestamps, start, side='left') - result = Signal(self.samples[start: ], - self.timestamps[start: ], - self.unit, - self.name, - self.info, - self.comment) + result = Signal( + self.samples[start:], + self.timestamps[start:], + self.unit, + self.name, + self.info, + self.comment, + ) else: # cut between start and stop @@ -313,33 +319,39 @@ def cut(self, start=None, stop=None): stop_ = np.searchsorted(self.timestamps, stop, side='right') if stop_ == start_: - if len(self.timestamps) and \ - stop >= self.timestamps[0] and \ - start <= self.timestamps[-1]: + if (len(self.timestamps) + and stop >= self.timestamps[0] + and start <= self.timestamps[-1]): # start and stop are found between 2 signal samples # so return the previous sample - result = Signal(self.samples[start_: start_ + 1], - self.timestamps[start_: start_ + 1], - self.unit, - self.name, - self.info, - self.comment) + result = Signal( + self.samples[start_: start_ + 1], + self.timestamps[start_: start_ + 1], + self.unit, + self.name, + self.info, + self.comment, + ) else: # signal is empty or start and stop are outside the # signal time base - result = Signal(np.array([]), - np.array([]), - self.unit, - self.name, - self.info, - self.comment) + result = Signal( + np.array([]), + np.array([]), + self.unit, + self.name, + self.info, + self.comment, + ) else: - result = Signal(self.samples[start_: stop_], - self.timestamps[start_: stop_], - self.unit, - self.name, - self.info, - self.comment) + result = Signal( + self.samples[start_: stop_], + self.timestamps[start_: stop_], + self.unit, + self.name, + self.info, + self.comment, + ) return result def extend(self, other): @@ -363,12 +375,14 @@ def extend(self, other): else: timestamps = other.timestamps - result = Signal(np.append(self.samples, other.samples), - np.append(self.timestamps, timestamps), - self.unit, - self.name, - self.info, - self.comment) + result = Signal( + np.append(self.samples, other.samples), + np.append(self.timestamps, timestamps), + self.unit, + self.name, + self.info, + self.comment, + ) else: result = self return result @@ -378,9 +392,11 @@ def interp(self, new_timestamps): if self.samples.dtype.kind == 'f': s = np.interp(new_timestamps, self.timestamps, self.samples) else: - idx = np.searchsorted(self.timestamps, - new_timestamps, - side='right') + idx = np.searchsorted( + self.timestamps, + new_timestamps, + side='right', + ) idx -= 1 idx = np.clip(idx, 0, idx[-1]) s = self.samples[idx] @@ -401,28 +417,34 @@ def __apply_func(self, other, func_name): func = getattr(self.samples, func_name) s = func(other) time = self.timestamps - return Signal(s, - time, - self.unit, - self.name, - self.info) + return Signal( + s, + time, + self.unit, + self.name, + self.info, + ) def __pos__(self): return self def __neg__(self): - return Signal(np.negative(self.samples), - self.timestamps, - self.unit, - self.name, - self.info) + return Signal( + np.negative(self.samples), + self.timestamps, + self.unit, + self.name, + self.info, + ) def __round__(self, n): - return Signal(np.around(self.samples, n), - self.timestamps, - self.unit, - self.name, - self.info) + return Signal( + np.around(self.samples, n), + self.timestamps, + self.unit, + self.name, + self.info, + ) def __sub__(self, other): return self.__apply_func(other, '__sub__') @@ -478,11 +500,13 @@ def __xor__(self, other): def __invert__(self): s = ~self.samples time = self.timestamps - return Signal(s, - time, - self.unit, - self.name, - self.info) + return Signal( + s, + time, + self.unit, + self.name, + self.info, + ) def __lshift__(self, other): return self.__apply_func(other, '__lshift__') @@ -509,7 +533,12 @@ def __ne__(self, other): return self.__apply_func(other, '__ne__') def __iter__(self): - return zip(self.samples, self.timestamps) + for item in ( + self.samples, + self.timestamps, + self.unit, + self.name): + yield item def __reversed__(self): return enumerate(zip(reversed(self.samples), reversed(self.timestamps))) @@ -518,11 +547,13 @@ def __len__(self): return len(self.samples) def __abs__(self): - return Signal(np.fabs(self.samples), - self.timestamps, - self.unit, - self.name, - self.info) + return Signal( + np.fabs(self.samples), + self.timestamps, + self.unit, + self.name, + self.info, + ) def __getitem__(self, val): return self.samples[val] @@ -532,11 +563,13 @@ def __setitem__(self, idx, val): def astype(self, np_type): """ returns new *Signal* with samples of dtype *np_type*""" - return Signal(self.samples.astype(np_type), - self.timestamps, - self.unit, - self.name, - self.info) + return Signal( + self.samples.astype(np_type), + self.timestamps, + self.unit, + self.name, + self.info, + ) if __name__ == '__main__': diff --git a/asammdf/utils.py b/asammdf/utils.py index 7a5c72fd3..1d071f9cc 100644 --- a/asammdf/utils.py +++ b/asammdf/utils.py @@ -3,24 +3,29 @@ asammdf utility functions and classes ''' -import itertools -import re +import warnings + +from struct import unpack from numpy import ( amin, amax, + where, ) - -from . import v3constants as v3c -from . import v4constants as v4c - +from . import v2_v3_constants as v3c +from . import v4_constants as v4c __all__ = [ 'MdfException', - 'get_fmt', - 'fmt_to_datatype', - 'pair', + 'get_fmt_v3', + 'get_fmt_v4', + 'get_min_max', + 'get_unique_name', + 'get_text_v4', + 'fix_dtype_fields', + 'fmt_to_datatype_v3', + 'fmt_to_datatype_v4', 'bytes', ] @@ -29,7 +34,7 @@ class MdfException(Exception): """MDF Exception class""" pass - +# pylint: disable=W0622 def bytes(obj): """ Python 2 compatibility function """ try: @@ -39,57 +44,85 @@ def bytes(obj): return obj else: raise +# pylint: enable=W0622 -def dtype_mapping(invalue, outversion=3): - """ map data types between mdf versions 3 and 4 +def get_text_v3(address, stream): + """ faster way extract string from mdf versions 2 and 3 TextBlock Parameters ---------- - invalue : int - original data type - outversion : int - mdf version of output data type + address : int + TextBlock address + stream : handle + file IO handle Returns ------- - res : int - mapped data type + text : str + unicode string """ - v3tov4 = {v3c.DATA_TYPE_UNSIGNED: v4c.DATA_TYPE_UNSIGNED_INTEL, - v3c.DATA_TYPE_SIGNED: v4c.DATA_TYPE_SIGNED_INTEL, - v3c.DATA_TYPE_FLOAT: v4c.DATA_TYPE_REAL_INTEL, - v3c.DATA_TYPE_DOUBLE: v4c.DATA_TYPE_REAL_INTEL, - v3c.DATA_TYPE_STRING: v4c.DATA_TYPE_STRING_LATIN_1, - v3c.DATA_TYPE_UNSIGNED_INTEL: v4c.DATA_TYPE_UNSIGNED_INTEL, - v3c.DATA_TYPE_UNSIGNED_INTEL: v4c.DATA_TYPE_UNSIGNED_INTEL, - v3c.DATA_TYPE_SIGNED_INTEL: v4c.DATA_TYPE_SIGNED_INTEL, - v3c.DATA_TYPE_SIGNED_INTEL: v4c.DATA_TYPE_SIGNED_INTEL, - v3c.DATA_TYPE_FLOAT_INTEL: v4c.DATA_TYPE_REAL_INTEL, - v3c.DATA_TYPE_FLOAT_INTEL: v4c.DATA_TYPE_REAL_INTEL, - v3c.DATA_TYPE_DOUBLE_INTEL: v4c.DATA_TYPE_REAL_INTEL, - v3c.DATA_TYPE_DOUBLE_INTEL: v4c.DATA_TYPE_REAL_INTEL} - - v4tov3 = {v4c.DATA_TYPE_UNSIGNED_INTEL: v3c.DATA_TYPE_UNSIGNED_INTEL, - v4c.DATA_TYPE_UNSIGNED_MOTOROLA: v3c.DATA_TYPE_UNSIGNED_MOTOROLA, - v4c.DATA_TYPE_SIGNED_INTEL: v3c.DATA_TYPE_SIGNED_INTEL, - v4c.DATA_TYPE_STRING_LATIN_1: v3c.DATA_TYPE_STRING, - v4c.DATA_TYPE_BYTEARRAY: v3c.DATA_TYPE_STRING, - v4c.DATA_TYPE_REAL_INTEL: v3c.DATA_TYPE_DOUBLE_INTEL, - v4c.DATA_TYPE_REAL_MOTOROLA: v3c.DATA_TYPE_DOUBLE_MOTOROLA, - v4c.DATA_TYPE_SIGNED_MOTOROLA: v3c.DATA_TYPE_SIGNED_MOTOROLA} - - if outversion == 3: - res = v4tov3[invalue] - else: - res = v3tov4[invalue] - return res + stream.seek(address + 2) + size = unpack(' (s0,s1), (s1,s2), (s2, s3), ...""" - current, next_ = itertools.tee(iterable) - next(next_, None) - return zip(current, next_) - - def get_unique_name(used_names, name): """ returns a list of unique names @@ -284,72 +371,25 @@ def get_min_max(samples): return min_val, max_val -def load_dbc(dbc): - """ Loads all messages description from DBC +def as_non_byte_sized_signed_int(integer_array, bit_length): + """ + The MDF spec allows values to be encoded as integers that aren't byte-sized. Numpy only knows how to do two's + complement on byte-sized integers (i.e. int16, int32, int64, etc.), so we have to calculate two's complement + ourselves in order to handle signed integers with unconventional lengths. Parameters ---------- - dbc : str - DBC file path - + integer_array : np.array + Array of integers to apply two's complement to + bit_length : int + Number of bits to sample from the array Returns ------- - messages : dict - the keys are the message ID's from the dbc - + integer_array : np.array + signed integer array with non-byte-sized two's complement applied """ - pattern = r'(?P^BO_ (.+\n)+)' - - with open(dbc, 'r') as dbc_file: - string = dbc_file.read() - - messages = {} - - for match_ in re.finditer(pattern, string, flags=re.M): - msg = match_.group('msg') - - pattern = r'BO_ (?P\d+) (?P[^ :]+): (?P\d).+' - match = re.search(pattern, msg) - can_id = int(match.group('can_id')) - name = match.group('name') - dlc = int(match.group('dlc')) - - pattern = (r'SG_ (?P[^ ]+) : ' - r'(?P\d{1,2})\|(?P\d{1,2})' - r'@(?P\d)(?P[+-])' - r' \((?P[^,]+),(?P[^)]+)\)' - r' \[(?P[^|]+)\|(?P[^]]+)\]' - r' "(?P[^"]*)"') - - messages[can_id] = { - 'name': name, - 'dlc': dlc, - 'signals': {}, - 'can_id': can_id - } - - signals = messages[can_id]['signals'] - - for match in re.finditer(pattern, msg): - signal_name = match.group('name') - start_bit = int(match.group('start_bit')) - size = int(match.group('size')) - byte_order = match.group('byte_order') - signed = match.group('signed') == '-' - factor = float(match.group('factor')) - offset = float(match.group('offset')) - min_value = float(match.group('min_value')) - max_value = float(match.group('max_value')) - unit = match.group('unit') - signals[signal_name] = {'start_bit': start_bit, - 'size': size, - 'byte_order': byte_order, - 'signed': signed, - 'factor': factor, - 'offset': offset, - 'min_value': min_value, - 'max_value': max_value, - 'unit': unit} - - return messages + truncated_integers = integer_array & ((1 << bit_length) - 1) # Zero out the unwanted bits + return where(truncated_integers >> bit_length - 1, # sign bit as a truth series (True when negative) + (2**bit_length - truncated_integers) * -1, # when negative, do two's complement + truncated_integers) # when positive, return the truncated int diff --git a/asammdf/v3blocks.py b/asammdf/v2_v3_blocks.py similarity index 70% rename from asammdf/v3blocks.py rename to asammdf/v2_v3_blocks.py index 58ddd4cca..db4340213 100644 --- a/asammdf/v3blocks.py +++ b/asammdf/v2_v3_blocks.py @@ -1,21 +1,20 @@ # -*- coding: utf-8 -*- -""" classes that implement the blocks for MDF version 3 """ +""" classes that implement the blocks for MDF versions 2 and 3 """ + +from __future__ import division, print_function -from __future__ import print_function, division import sys import time - -from struct import unpack, pack, unpack_from from getpass import getuser +from struct import pack, unpack, unpack_from -from . import v3constants as v3c - +from . import v2_v3_constants as v23c +from .utils import MdfException PYVERSION = sys.version_info[0] PYVERSION_MAJOR = sys.version_info[0] * 10 + sys.version_info[1] -SEEK_START = v3c.SEEK_START -SEEK_END = v3c.SEEK_END - +SEEK_START = v23c.SEEK_START +SEEK_END = v23c.SEEK_END __all__ = [ 'Channel', @@ -116,53 +115,107 @@ class Channel(dict): b'CN' ''' - __slots__ = ['name', 'address'] + __slots__ = ['name', 'address', 'comment', 'display_name'] def __init__(self, **kargs): super(Channel, self).__init__() - self.name = '' + self.name = self.display_name = self.comment = '' try: stream = kargs['stream'] self.address = address = kargs['address'] + stream.seek(address + 2, SEEK_START) + size = unpack('= v23c.CN_LONGNAME_BLOCK_SIZE: + self['long_name_addr'] = kargs.get('long_name_addr', 0) + if self['block_len'] >= v23c.CN_DISPLAYNAME_BLOCK_SIZE: + self['display_name_addr'] = kargs.get('display_name_addr', 0) + self['aditional_byte_offset'] = kargs.get( + 'aditional_byte_offset', + 0, + ) def __bytes__(self): + + block_len = self['block_len'] + if block_len == v23c.CN_DISPLAYNAME_BLOCK_SIZE: + fmt = v23c.FMT_CHANNEL_DISPLAYNAME + keys = v23c.KEYS_CHANNEL_DISPLAYNAME + elif block_len == v23c.CN_LONGNAME_BLOCK_SIZE: + fmt = v23c.FMT_CHANNEL_LONGNAME + keys = v23c.KEYS_CHANNEL_LONGNAME + else: + fmt = v23c.FMT_CHANNEL_SHORT + keys = v23c.KEYS_CHANNEL_SHORT + if PYVERSION_MAJOR >= 36: - result = pack(v3c.FMT_CHANNEL, *self.values()) + result = pack(fmt, *self.values()) else: - result = pack( - v3c.FMT_CHANNEL, - *[self[key] for key in v3c.KEYS_CHANNEL] - ) + result = pack(fmt, *[self[key] for key in keys]) return result def __lt__(self, other): @@ -286,19 +350,32 @@ class ChannelConversion(dict): 0, 100.0 ''' - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(ChannelConversion, self).__init__() - try: - stream = kargs['stream'] - self.address = address = kargs['address'] - stream.seek(address, SEEK_START) - block = stream.read(4) - (self['id'], - self['block_len']) = unpack('<2sH', block) - size = self['block_len'] - block = stream.read(size - 4) + if 'raw_bytes' in kargs or 'stream' in kargs: + try: + self.address = 0 + block = kargs['raw_bytes'] + (self['id'], + self['block_len']) = unpack_from( + '<2sH', + block, + ) + size = self['block_len'] + block = block[4:] + + except KeyError: + stream = kargs['stream'] + self.address = address = kargs['address'] + stream.seek(address, SEEK_START) + block = stream.read(4) + (self['id'], + self['block_len']) = unpack('<2sH', block) + size = self['block_len'] + block = stream.read(size - 4) (self['range_flag'], self['min_phy_value'], @@ -306,54 +383,58 @@ def __init__(self, **kargs): self['unit'], self['conversion_type'], self['ref_param_nr']) = unpack_from( - v3c.FMT_CONVERSION_COMMON_SHORT, + v23c.FMT_CONVERSION_COMMON_SHORT, block, ) conv_type = self['conversion_type'] - if conv_type == v3c.CONVERSION_TYPE_LINEAR: + if conv_type == v23c.CONVERSION_TYPE_LINEAR: (self['b'], self['a']) = unpack_from( '<2d', block, - v3c.CC_COMMON_SHORT_SIZE, + v23c.CC_COMMON_SHORT_SIZE, ) - if not size == v3c.CC_LIN_BLOCK_SIZE: - self['CANapeHiddenExtra'] = block[v3c.CC_LIN_BLOCK_SIZE - 4:] + if not size == v23c.CC_LIN_BLOCK_SIZE: + self['CANapeHiddenExtra'] = block[v23c.CC_LIN_BLOCK_SIZE - 4:] - elif conv_type == v3c.CONVERSION_TYPE_NONE: + elif conv_type == v23c.CONVERSION_TYPE_NONE: pass - elif conv_type == v3c.CONVERSION_TYPE_FORMULA: - self['formula'] = block[v3c.CC_COMMON_SHORT_SIZE:] + elif conv_type == v23c.CONVERSION_TYPE_FORMULA: + self['formula'] = block[v23c.CC_COMMON_SHORT_SIZE:] elif conv_type in ( - v3c.CONVERSION_TYPE_TABI, - v3c.CONVERSION_TYPE_TABX): + v23c.CONVERSION_TYPE_TABI, + v23c.CONVERSION_TYPE_TABX): nr = self['ref_param_nr'] values = unpack_from( - '<{}d'.format(2*nr), + '<{}d'.format(2 * nr), block, - v3c.CC_COMMON_SHORT_SIZE, + v23c.CC_COMMON_SHORT_SIZE, ) for i in range(nr): (self['raw_{}'.format(i)], - self['phys_{}'.format(i)]) = values[i*2], values[2*i+1] + self['phys_{}'.format(i)]) = values[i * 2], values[2 * i + 1] elif conv_type in ( - v3c.CONVERSION_TYPE_POLY, - v3c.CONVERSION_TYPE_RAT): + v23c.CONVERSION_TYPE_POLY, + v23c.CONVERSION_TYPE_RAT): (self['P1'], self['P2'], self['P3'], self['P4'], self['P5'], - self['P6']) = unpack_from('<6d', block) + self['P6']) = unpack_from( + '<6d', + block, + v23c.CC_COMMON_SHORT_SIZE, + ) elif conv_type in ( - v3c.CONVERSION_TYPE_EXPO, - v3c.CONVERSION_TYPE_LOGH): + v23c.CONVERSION_TYPE_EXPO, + v23c.CONVERSION_TYPE_LOGH): (self['P1'], self['P2'], self['P3'], @@ -363,80 +444,86 @@ def __init__(self, **kargs): self['P7']) = unpack_from( '<7d', block, - v3c.CC_COMMON_SHORT_SIZE, + v23c.CC_COMMON_SHORT_SIZE, ) - elif conv_type == v3c.CONVERSION_TYPE_VTAB: + elif conv_type == v23c.CONVERSION_TYPE_VTAB: nr = self['ref_param_nr'] values = unpack_from( '<' + 'd32s' * nr, block, - v3c.CC_COMMON_SHORT_SIZE, + v23c.CC_COMMON_SHORT_SIZE, ) for i in range(nr): (self['param_val_{}'.format(i)], - self['text_{}'.format(i)]) = values[i*2], values[2*i+1] + self['text_{}'.format(i)]) = values[i * 2], values[2 * i + 1] - elif conv_type == v3c.CONVERSION_TYPE_VTABR: + elif conv_type == v23c.CONVERSION_TYPE_VTABR: nr = self['ref_param_nr'] values = unpack_from( '<' + '2dI' * nr, block, - v3c.CC_COMMON_SHORT_SIZE, + v23c.CC_COMMON_SHORT_SIZE, ) for i in range(nr): (self['lower_{}'.format(i)], self['upper_{}'.format(i)], - self['text_{}'.format(i)]) = values[i*3], values[3*i+1], values[3*i+2] - except KeyError: + self['text_{}'.format(i)]) = values[i * 3], values[3 * i + 1], values[3 * i + 2] + + if self['id'] != b'CC': + message = 'Expected "CC" block but found "{}"' + raise MdfException(message.format(self['id'])) + + else: + self.address = 0 self['id'] = 'CC'.encode('latin-1') - if kargs['conversion_type'] == v3c.CONVERSION_TYPE_NONE: + if kargs['conversion_type'] == v23c.CONVERSION_TYPE_NONE: self['block_len'] = kargs.get( 'block_len', - v3c.CC_COMMON_BLOCK_SIZE, + v23c.CC_COMMON_BLOCK_SIZE, ) self['range_flag'] = kargs.get('range_flag', 1) self['min_phy_value'] = kargs.get('min_phy_value', 0) self['max_phy_value'] = kargs.get('max_phy_value', 0) - self['unit'] = kargs.get('unit', ('\0'*20).encode('latin-1')) - self['conversion_type'] = v3c.CONVERSION_TYPE_NONE + self['unit'] = kargs.get('unit', ('\0' * 20).encode('latin-1')) + self['conversion_type'] = v23c.CONVERSION_TYPE_NONE self['ref_param_nr'] = kargs.get('ref_param_nr', 0) - elif kargs['conversion_type'] == v3c.CONVERSION_TYPE_LINEAR: + elif kargs['conversion_type'] == v23c.CONVERSION_TYPE_LINEAR: self['block_len'] = kargs.get( 'block_len', - v3c.CC_LIN_BLOCK_SIZE, + v23c.CC_LIN_BLOCK_SIZE, ) self['range_flag'] = kargs.get('range_flag', 1) self['min_phy_value'] = kargs.get('min_phy_value', 0) self['max_phy_value'] = kargs.get('max_phy_value', 0) - self['unit'] = kargs.get('unit', ('\0'*20).encode('latin-1')) - self['conversion_type'] = v3c.CONVERSION_TYPE_LINEAR + self['unit'] = kargs.get('unit', ('\0' * 20).encode('latin-1')) + self['conversion_type'] = v23c.CONVERSION_TYPE_LINEAR self['ref_param_nr'] = kargs.get('ref_param_nr', 2) self['b'] = kargs.get('b', 0) self['a'] = kargs.get('a', 1) - if not self['block_len'] == v3c.CC_LIN_BLOCK_SIZE: + if not self['block_len'] == v23c.CC_LIN_BLOCK_SIZE: self['CANapeHiddenExtra'] = kargs['CANapeHiddenExtra'] elif kargs['conversion_type'] in ( - v3c.CONVERSION_TYPE_POLY, - v3c.CONVERSION_TYPE_RAT): + v23c.CONVERSION_TYPE_POLY, + v23c.CONVERSION_TYPE_RAT): self['block_len'] = kargs.get( 'block_len', - v3c.CC_POLY_BLOCK_SIZE, + v23c.CC_POLY_BLOCK_SIZE, ) self['range_flag'] = kargs.get('range_flag', 1) self['min_phy_value'] = kargs.get('min_phy_value', 0) self['max_phy_value'] = kargs.get('max_phy_value', 0) - self['unit'] = kargs.get('unit', ('\0'*20).encode('latin-1')) + self['unit'] = kargs.get('unit', ('\0' * 20).encode('latin-1')) self['conversion_type'] = kargs.get( 'conversion_type', - v3c.CONVERSION_TYPE_POLY, + v23c.CONVERSION_TYPE_POLY, ) self['ref_param_nr'] = kargs.get('ref_param_nr', 2) self['P1'] = kargs.get('P1', 0) @@ -447,19 +534,19 @@ def __init__(self, **kargs): self['P6'] = kargs.get('P6', 0) elif kargs['conversion_type'] in ( - v3c.CONVERSION_TYPE_EXPO, - v3c.CONVERSION_TYPE_LOGH): + v23c.CONVERSION_TYPE_EXPO, + v23c.CONVERSION_TYPE_LOGH): self['block_len'] = kargs.get( 'block_len', - v3c.CC_EXPO_BLOCK_SIZE, + v23c.CC_EXPO_BLOCK_SIZE, ) self['range_flag'] = kargs.get('range_flag', 1) self['min_phy_value'] = kargs.get('min_phy_value', 0) self['max_phy_value'] = kargs.get('max_phy_value', 0) - self['unit'] = kargs.get('unit', ('\0'*20).encode('latin-1')) + self['unit'] = kargs.get('unit', ('\0' * 20).encode('latin-1')) self['conversion_type'] = kargs.get( 'conversion_type', - v3c.CONVERSION_TYPE_EXPO, + v23c.CONVERSION_TYPE_EXPO, ) self['ref_param_nr'] = kargs.get('ref_param_nr', 2) self['P1'] = kargs.get('P1', 0) @@ -470,68 +557,68 @@ def __init__(self, **kargs): self['P6'] = kargs.get('P6', 0) self['P7'] = kargs.get('P7', 0) - elif kargs['conversion_type'] == v3c.CONVERSION_TYPE_FORMULA: + elif kargs['conversion_type'] == v23c.CONVERSION_TYPE_FORMULA: self['block_len'] = kargs.get( 'block_len', - v3c.CC_POLY_BLOCK_SIZE, + v23c.CC_POLY_BLOCK_SIZE, ) self['range_flag'] = kargs.get('range_flag', 1) self['min_phy_value'] = kargs.get('min_phy_value', 0) self['max_phy_value'] = kargs.get('max_phy_value', 0) - self['unit'] = kargs.get('unit', ('\0'*20).encode('latin-1')) + self['unit'] = kargs.get('unit', ('\0' * 20).encode('latin-1')) self['conversion_type'] = kargs.get( 'conversion_type', - v3c.CONVERSION_TYPE_FORMULA, + v23c.CONVERSION_TYPE_FORMULA, ) self['ref_param_nr'] = kargs.get('ref_param_nr', 2) - self['formula'] = kargs.get('formula', b'X1'+b'\0'*254) + self['formula'] = kargs.get('formula', b'X1' + b'\0' * 254) elif kargs['conversion_type'] in ( - v3c.CONVERSION_TYPE_TABI, - v3c.CONVERSION_TYPE_TABX): + v23c.CONVERSION_TYPE_TABI, + v23c.CONVERSION_TYPE_TABX): nr = kargs['ref_param_nr'] self['block_len'] = kargs['block_len'] self['range_flag'] = kargs.get('range_flag', 1) self['min_phy_value'] = kargs.get('min_phy_value', 0) self['max_phy_value'] = kargs.get('max_phy_value', 0) - self['unit'] = kargs.get('unit', ('\0'*20).encode('latin-1')) + self['unit'] = kargs.get('unit', ('\0' * 20).encode('latin-1')) self['conversion_type'] = kargs.get( 'conversion_type', - v3c.CONVERSION_TYPE_TABI, + v23c.CONVERSION_TYPE_TABI, ) self['ref_param_nr'] = kargs.get('ref_param_nr', 2) for i in range(nr): self['raw_{}'.format(i)] = kargs['raw_{}'.format(i)] self['phys_{}'.format(i)] = kargs['phys_{}'.format(i)] - elif kargs['conversion_type'] == v3c.CONVERSION_TYPE_VTAB: + elif kargs['conversion_type'] == v23c.CONVERSION_TYPE_VTAB: nr = kargs['ref_param_nr'] self['block_len'] = kargs.get( 'block_len', - v3c.CC_COMMON_BLOCK_SIZE + 40*nr, + v23c.CC_COMMON_BLOCK_SIZE + 40 * nr, ) self['range_flag'] = kargs.get('range_flag', 0) self['min_phy_value'] = kargs.get('min_phy_value', 0) self['max_phy_value'] = kargs.get('max_phy_value', 0) - self['unit'] = kargs.get('unit', ('\0'*20).encode('latin-1')) - self['conversion_type'] = v3c.CONVERSION_TYPE_VTAB + self['unit'] = kargs.get('unit', ('\0' * 20).encode('latin-1')) + self['conversion_type'] = v23c.CONVERSION_TYPE_VTAB self['ref_param_nr'] = nr for i in range(nr): self['param_val_{}'.format(i)] = kargs['param_val_{}'.format(i)] self['text_{}'.format(i)] = kargs['text_{}'.format(i)] - elif kargs['conversion_type'] == v3c.CONVERSION_TYPE_VTABR: + elif kargs['conversion_type'] == v23c.CONVERSION_TYPE_VTABR: nr = kargs.get('ref_param_nr', 0) self['block_len'] = kargs.get( 'block_len', - v3c.CC_COMMON_BLOCK_SIZE + 20*nr, + v23c.CC_COMMON_BLOCK_SIZE + 20 * nr, ) self['range_flag'] = kargs.get('range_flag', 0) self['min_phy_value'] = kargs.get('min_phy_value', 0) self['max_phy_value'] = kargs.get('max_phy_value', 0) - self['unit'] = kargs.get('unit', ('\0'*20).encode('latin-1')) - self['conversion_type'] = v3c.CONVERSION_TYPE_VTABR + self['unit'] = kargs.get('unit', ('\0' * 20).encode('latin-1')) + self['conversion_type'] = v23c.CONVERSION_TYPE_VTABR self['ref_param_nr'] = kargs.get('ref_param_nr', 0) for i in range(self['ref_param_nr']): @@ -547,58 +634,58 @@ def __bytes__(self): conv = self['conversion_type'] # compute the fmt - if conv == v3c.CONVERSION_TYPE_NONE: - fmt = v3c.FMT_CONVERSION_COMMON - elif conv == v3c.CONVERSION_TYPE_FORMULA: - fmt = v3c.FMT_CONVERSION_FORMULA - elif conv == v3c.CONVERSION_TYPE_LINEAR: - fmt = v3c.FMT_CONVERSION_LINEAR - if not self['block_len'] == v3c.CC_LIN_BLOCK_SIZE: - fmt += '{}s'.format(self['block_len'] - v3c.CC_LIN_BLOCK_SIZE) - elif conv in (v3c.CONVERSION_TYPE_POLY, v3c.CONVERSION_TYPE_RAT): - fmt = v3c.FMT_CONVERSION_POLY_RAT - elif conv in (v3c.CONVERSION_TYPE_EXPO, v3c.CONVERSION_TYPE_LOGH): - fmt = v3c.FMT_CONVERSION_EXPO_LOGH - elif conv in (v3c.CONVERSION_TYPE_TABI, v3c.CONVERSION_TYPE_TABX): + if conv == v23c.CONVERSION_TYPE_NONE: + fmt = v23c.FMT_CONVERSION_COMMON + elif conv == v23c.CONVERSION_TYPE_FORMULA: + fmt = v23c.FMT_CONVERSION_FORMULA + elif conv == v23c.CONVERSION_TYPE_LINEAR: + fmt = v23c.FMT_CONVERSION_LINEAR + if not self['block_len'] == v23c.CC_LIN_BLOCK_SIZE: + fmt += '{}s'.format(self['block_len'] - v23c.CC_LIN_BLOCK_SIZE) + elif conv in (v23c.CONVERSION_TYPE_POLY, v23c.CONVERSION_TYPE_RAT): + fmt = v23c.FMT_CONVERSION_POLY_RAT + elif conv in (v23c.CONVERSION_TYPE_EXPO, v23c.CONVERSION_TYPE_LOGH): + fmt = v23c.FMT_CONVERSION_EXPO_LOGH + elif conv in (v23c.CONVERSION_TYPE_TABI, v23c.CONVERSION_TYPE_TABX): nr = self['ref_param_nr'] - fmt = v3c.FMT_CONVERSION_COMMON + '{}d'.format(nr * 2) - elif conv == v3c.CONVERSION_TYPE_VTABR: + fmt = v23c.FMT_CONVERSION_COMMON + '{}d'.format(nr * 2) + elif conv == v23c.CONVERSION_TYPE_VTABR: nr = self['ref_param_nr'] - fmt = v3c.FMT_CONVERSION_COMMON + '2dI' * nr - elif conv == v3c.CONVERSION_TYPE_VTAB: + fmt = v23c.FMT_CONVERSION_COMMON + '2dI' * nr + elif conv == v23c.CONVERSION_TYPE_VTAB: nr = self['ref_param_nr'] - fmt = v3c.FMT_CONVERSION_COMMON + 'd32s' * nr + fmt = v23c.FMT_CONVERSION_COMMON + 'd32s' * nr # compute the keys only for Python < 3.6 if PYVERSION_MAJOR < 36: - if conv == v3c.CONVERSION_TYPE_NONE: - keys = v3c.KEYS_CONVESION_NONE - elif conv == v3c.CONVERSION_TYPE_FORMULA: - keys = v3c.KEYS_CONVESION_FORMULA - elif conv == v3c.CONVERSION_TYPE_LINEAR: - keys = v3c.KEYS_CONVESION_LINEAR - if not self['block_len'] == v3c.CC_LIN_BLOCK_SIZE: + if conv == v23c.CONVERSION_TYPE_NONE: + keys = v23c.KEYS_CONVESION_NONE + elif conv == v23c.CONVERSION_TYPE_FORMULA: + keys = v23c.KEYS_CONVESION_FORMULA + elif conv == v23c.CONVERSION_TYPE_LINEAR: + keys = v23c.KEYS_CONVESION_LINEAR + if not self['block_len'] == v23c.CC_LIN_BLOCK_SIZE: keys += ('CANapeHiddenExtra',) - elif conv in (v3c.CONVERSION_TYPE_POLY, v3c.CONVERSION_TYPE_RAT): - keys = v3c.KEYS_CONVESION_POLY_RAT - elif conv in (v3c.CONVERSION_TYPE_EXPO, v3c.CONVERSION_TYPE_LOGH): - keys = v3c.KEYS_CONVESION_EXPO_LOGH - elif conv in (v3c.CONVERSION_TYPE_TABI, v3c.CONVERSION_TYPE_TABX): + elif conv in (v23c.CONVERSION_TYPE_POLY, v23c.CONVERSION_TYPE_RAT): + keys = v23c.KEYS_CONVESION_POLY_RAT + elif conv in (v23c.CONVERSION_TYPE_EXPO, v23c.CONVERSION_TYPE_LOGH): + keys = v23c.KEYS_CONVESION_EXPO_LOGH + elif conv in (v23c.CONVERSION_TYPE_TABI, v23c.CONVERSION_TYPE_TABX): nr = self['ref_param_nr'] - keys = list(v3c.KEYS_CONVESION_NONE) + keys = list(v23c.KEYS_CONVESION_NONE) for i in range(nr): keys.append('raw_{}'.format(i)) keys.append('phys_{}'.format(i)) - elif conv == v3c.CONVERSION_TYPE_VTABR: + elif conv == v23c.CONVERSION_TYPE_VTABR: nr = self['ref_param_nr'] - keys = list(v3c.KEYS_CONVESION_NONE) + keys = list(v23c.KEYS_CONVESION_NONE) for i in range(nr): keys.append('lower_{}'.format(i)) keys.append('upper_{}'.format(i)) keys.append('text_{}'.format(i)) - elif conv == v3c.CONVERSION_TYPE_VTAB: + elif conv == v23c.CONVERSION_TYPE_VTAB: nr = self['ref_param_nr'] - keys = list(v3c.KEYS_CONVESION_NONE) + keys = list(v23c.KEYS_CONVESION_NONE) for i in range(nr): keys.append('param_val_{}'.format(i)) keys.append('text_{}'.format(i)) @@ -650,6 +737,7 @@ class ChannelDependency(dict): ''' __slots__ = ['address', 'referenced_channels'] + def __init__(self, **kargs): super(ChannelDependency, self).__init__() @@ -672,9 +760,9 @@ def __init__(self, **kargs): ) for i in range(self['sd_nr']): - self['dg_{}'.format(i)] = links[3*i] - self['cg_{}'.format(i)] = links[3*i+1] - self['ch_{}'.format(i)] = links[3*i+2] + self['dg_{}'.format(i)] = links[3 * i] + self['cg_{}'.format(i)] = links[3 * i + 1] + self['ch_{}'.format(i)] = links[3 * i + 2] optional_dims_nr = (self['block_len'] - 8 - links_size) // 2 if optional_dims_nr: @@ -685,6 +773,10 @@ def __init__(self, **kargs): for i, dim in enumerate(dims): self['dim_{}'.format(i)] = dim + if self['id'] != b'CD': + message = 'Expected "CD" block but found "{}"' + raise MdfException(message.format(self['id'])) + except KeyError: sd_nr = kargs['sd_nr'] self['id'] = b'CD' @@ -773,38 +865,76 @@ class ChannelExtension(dict): block address inside mdf file ''' - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(ChannelExtension, self).__init__() - try: + if 'raw_bytes' in kargs: + + (self['id'], + self['block_len'], + self['type']) = unpack_from( + v23c.FMT_SOURCE_COMMON, + kargs['raw_bytes'], + ) + if self['type'] == v23c.SOURCE_ECU: + (self['module_nr'], + self['module_address'], + self['description'], + self['ECU_identification'], + self['reserved0']) = unpack_from( + v23c.FMT_SOURCE_EXTRA_ECU, + kargs['raw_bytes'], + 6, + ) + elif self['type'] == v23c.SOURCE_VECTOR: + (self['CAN_id'], + self['CAN_ch_index'], + self['message_name'], + self['sender_name'], + self['reserved0']) = unpack_from( + v23c.FMT_SOURCE_EXTRA_VECTOR, + kargs['raw_bytes'], + 6, + ) + + if self['id'] != b'CE': + message = 'Expected "CE" block but found "{}"' + raise MdfException(message.format(self['id'])) + + elif 'stream' in kargs: stream = kargs['stream'] self.address = address = kargs['address'] stream.seek(address, SEEK_START) (self['id'], self['block_len'], - self['type']) = unpack(v3c.FMT_SOURCE_COMMON, stream.read(6)) + self['type']) = unpack(v23c.FMT_SOURCE_COMMON, stream.read(6)) block = stream.read(self['block_len'] - 6) - if self['type'] == v3c.SOURCE_ECU: + if self['type'] == v23c.SOURCE_ECU: (self['module_nr'], self['module_address'], self['description'], self['ECU_identification'], - self['reserved0']) = unpack(v3c.FMT_SOURCE_EXTRA_ECU, block) - elif self['type'] == v3c.SOURCE_VECTOR: + self['reserved0']) = unpack(v23c.FMT_SOURCE_EXTRA_ECU, block) + elif self['type'] == v23c.SOURCE_VECTOR: (self['CAN_id'], self['CAN_ch_index'], self['message_name'], self['sender_name'], - self['reserved0']) = unpack(v3c.FMT_SOURCE_EXTRA_VECTOR, block) - except KeyError: + self['reserved0']) = unpack(v23c.FMT_SOURCE_EXTRA_VECTOR, block) + if self['id'] != b'CE': + message = 'Expected "CE" block but found "{}"' + raise MdfException(message.format(self['id'])) + + else: self.address = 0 - self['id'] = kargs.get('id', 'CE'.encode('latin-1')) - self['block_len'] = kargs.get('block_len', v3c.CE_BLOCK_SIZE) + self['id'] = b'CE' + self['block_len'] = kargs.get('block_len', v23c.CE_BLOCK_SIZE) self['type'] = kargs.get('type', 2) - if self['type'] == v3c.SOURCE_ECU: + if self['type'] == v23c.SOURCE_ECU: self['module_nr'] = kargs.get('module_nr', 0) self['module_address'] = kargs.get('module_address', 0) self['description'] = kargs.get('description', b'\0') @@ -813,7 +943,7 @@ def __init__(self, **kargs): b'\0', ) self['reserved0'] = kargs.get('reserved0', b'\0') - elif self['type'] == v3c.SOURCE_VECTOR: + elif self['type'] == v23c.SOURCE_VECTOR: self['CAN_id'] = kargs.get('CAN_id', 0) self['CAN_ch_index'] = kargs.get('CAN_ch_index', 0) self['message_name'] = kargs.get('message_name', b'\0') @@ -822,12 +952,12 @@ def __init__(self, **kargs): def __bytes__(self): typ = self['type'] - if typ == v3c.SOURCE_ECU: - fmt = v3c.FMT_SOURCE_ECU - keys = v3c.KEYS_SOURCE_ECU + if typ == v23c.SOURCE_ECU: + fmt = v23c.FMT_SOURCE_ECU + keys = v23c.KEYS_SOURCE_ECU else: - fmt = v3c.FMT_SOURCE_VECTOR - keys = v3c.KEYS_SOURCE_VECTOR + fmt = v23c.FMT_SOURCE_VECTOR + keys = v23c.KEYS_SOURCE_VECTOR if PYVERSION_MAJOR >= 36: result = pack(fmt, *self.values()) @@ -887,7 +1017,8 @@ class ChannelGroup(dict): b'CG' ''' - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(ChannelGroup, self).__init__() @@ -896,7 +1027,7 @@ def __init__(self, **kargs): stream = kargs['stream'] self.address = address = kargs['address'] stream.seek(address, SEEK_START) - block = stream.read(v3c.CG_BLOCK_SIZE) + block = stream.read(v23c.CG_PRE_330_BLOCK_SIZE) (self['id'], self['block_len'], @@ -906,13 +1037,19 @@ def __init__(self, **kargs): self['record_id'], self['ch_nr'], self['samples_byte_nr'], - self['cycles_nr']) = unpack(v3c.FMT_CHANNEL_GROUP, block) - if self['block_len'] == v3c.CG33_BLOCK_SIZE: + self['cycles_nr']) = unpack(v23c.FMT_CHANNEL_GROUP, block) + if self['block_len'] == v23c.CG_POST_330_BLOCK_SIZE: self['sample_reduction_addr'] = unpack('= 36: @@ -935,6 +1072,7 @@ def __bytes__(self): result = pack(fmt, *[self[key] for key in keys]) return result + class DataBlock(dict): """Data Block class derived from *dict* @@ -962,7 +1100,8 @@ class DataBlock(dict): binary file stream """ - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(DataBlock, self).__init__() @@ -1018,7 +1157,8 @@ class DataGroup(dict): block address inside mdf file ''' - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(DataGroup, self).__init__() @@ -1026,7 +1166,7 @@ def __init__(self, **kargs): stream = kargs['stream'] self.address = address = kargs['address'] stream.seek(address, SEEK_START) - block = stream.read(v3c.DG31_BLOCK_SIZE) + block = stream.read(v23c.DG_PRE_320_BLOCK_SIZE) (self['id'], self['block_len'], @@ -1035,31 +1175,38 @@ def __init__(self, **kargs): self['trigger_addr'], self['data_block_addr'], self['cg_nr'], - self['record_id_nr']) = unpack(v3c.FMT_DATA_GROUP, block) + self['record_id_nr']) = unpack(v23c.FMT_DATA_GROUP_PRE_320, block) - if self['block_len'] == v3c.DG32_BLOCK_SIZE: + if self['block_len'] == v23c.DG_POST_320_BLOCK_SIZE: self['reserved0'] = stream.read(4) + if self['id'] != b'DG': + message = 'Expected "DG" block but found "{}"' + raise MdfException(message.format(self['id'])) + except KeyError: self.address = 0 - self['id'] = kargs.get('id', 'DG'.encode('latin-1')) - self['block_len'] = kargs.get('block_len', v3c. DG32_BLOCK_SIZE) + self['id'] = b'DG' + self['block_len'] = kargs.get( + 'block_len', + v23c.DG_PRE_320_BLOCK_SIZE, + ) self['next_dg_addr'] = kargs.get('next_dg_addr', 0) self['first_cg_addr'] = kargs.get('first_cg_addr', 0) self['trigger_addr'] = kargs.get('comment_addr', 0) self['data_block_addr'] = kargs.get('data_block_addr', 0) self['cg_nr'] = kargs.get('cg_nr', 1) self['record_id_nr'] = kargs.get('record_id_nr', 0) - if self['block_len'] == v3c.DG32_BLOCK_SIZE: + if self['block_len'] == v23c.DG_POST_320_BLOCK_SIZE: self['reserved0'] = b'\0\0\0\0' def __bytes__(self): - if self['block_len'] == v3c.DG32_BLOCK_SIZE: - fmt = v3c.FMT_DATA_GROUP_32 - keys = v3c.KEYS_DATA_GROUP_32 + if self['block_len'] == v23c.DG_POST_320_BLOCK_SIZE: + fmt = v23c.FMT_DATA_GROUP_POST_320 + keys = v23c.KEYS_DATA_GROUP_POST_320 else: - fmt = v3c.FMT_DATA_GROUP - keys = v3c.KEYS_DATA_GROUP + fmt = v23c.FMT_DATA_GROUP_PRE_320 + keys = v23c.KEYS_DATA_GROUP_PRE_320 if PYVERSION_MAJOR >= 36: result = pack(fmt, *self.values()) else: @@ -1103,7 +1250,8 @@ class FileIdentificationBlock(dict): block address inside mdf file; should be 0 always ''' - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(FileIdentificationBlock, self).__init__() @@ -1124,15 +1272,15 @@ def __init__(self, **kargs): self['reserved1'], self['unfinalized_standard_flags'], self['unfinalized_custom_flags']) = unpack( - v3c.ID_FMT, - stream.read(v3c.ID_BLOCK_SIZE), + v23c.ID_FMT, + stream.read(v23c.ID_BLOCK_SIZE), ) except KeyError: version = kargs['version'] self['file_identification'] = 'MDF '.encode('latin-1') self['version_str'] = version.encode('latin-1') + b'\0' * 4 self['program_identification'] = 'Python '.encode('latin-1') - self['byte_order'] = v3c.BYTE_ORDER_INTEL + self['byte_order'] = v23c.BYTE_ORDER_INTEL self['float_format'] = 0 self['mdf_version'] = int(version.replace('.', '')) self['code_page'] = 0 @@ -1143,9 +1291,9 @@ def __init__(self, **kargs): def __bytes__(self): if PYVERSION_MAJOR >= 36: - result = pack(v3c.ID_FMT, *self.values()) + result = pack(v23c.ID_FMT, *self.values()) else: - result = pack(v3c.ID_FMT, *[self[key] for key in v3c.ID_KEYS]) + result = pack(v23c.ID_FMT, *[self[key] for key in v23c.ID_KEYS]) return result @@ -1191,7 +1339,8 @@ class HeaderBlock(dict): block address inside mdf file; should be 64 always ''' - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(HeaderBlock, self).__init__() @@ -1213,28 +1362,32 @@ def __init__(self, **kargs): self['organization'], self['project'], self['subject']) = unpack( - v3c.HEADER_COMMON_FMT, - stream.read(v3c.HEADER_COMMON_SIZE), + v23c.HEADER_COMMON_FMT, + stream.read(v23c.HEADER_COMMON_SIZE), ) - if self['block_len'] > v3c.HEADER_COMMON_SIZE: + if self['block_len'] > v23c.HEADER_COMMON_SIZE: (self['abs_time'], self['tz_offset'], self['time_quality'], self['timer_identification']) = unpack( - v3c.HEADER_POST_320_EXTRA_FMT, - stream.read(v3c.HEADER_POST_320_EXTRA_SIZE), + v23c.HEADER_POST_320_EXTRA_FMT, + stream.read(v23c.HEADER_POST_320_EXTRA_SIZE), ) + if self['id'] != b'HD': + message = 'Expected "HD" block but found "{}"' + raise MdfException(message.format(self['id'])) + except KeyError: version = kargs.get('version', '3.20') - self['id'] = 'HD'.encode('latin-1') - self['block_len'] = 208 if version in ('3.20', '3.30') else 164 + self['id'] = b'HD' + self['block_len'] = 208 if version >= '3.20' else 164 self['first_dg_addr'] = 0 self['comment_addr'] = 0 self['program_addr'] = 0 self['dg_nr'] = 0 - t1 = time.time() * 10**9 + t1 = time.time() * 10 ** 9 t2 = time.gmtime() self['date'] = '{:\0<10}'.format(time.strftime('%d:%m:%Y', t2)).encode('latin-1') self['time'] = '{:\0<8}'.format(time.strftime('%X', t2)).encode('latin-1') @@ -1243,18 +1396,18 @@ def __init__(self, **kargs): self['project'] = '{:\0<32}'.format('').encode('latin-1') self['subject'] = '{:\0<32}'.format('').encode('latin-1') - if self['block_len'] > v3c.HEADER_COMMON_SIZE: + if self['block_len'] > v23c.HEADER_COMMON_SIZE: self['abs_time'] = int(t1) self['tz_offset'] = 2 self['time_quality'] = 0 self['timer_identification'] = '{:\0<32}'.format('Local PC Reference Time').encode('latin-1') def __bytes__(self): - fmt = v3c.HEADER_COMMON_FMT - keys = v3c.HEADER_COMMON_KEYS - if self['block_len'] > v3c.HEADER_COMMON_SIZE: - fmt += v3c.HEADER_POST_320_EXTRA_FMT - keys += v3c.HEADER_POST_320_EXTRA_KEYS + fmt = v23c.HEADER_COMMON_FMT + keys = v23c.HEADER_COMMON_KEYS + if self['block_len'] > v23c.HEADER_COMMON_SIZE: + fmt += v23c.HEADER_POST_320_EXTRA_FMT + keys += v23c.HEADER_POST_320_EXTRA_KEYS if PYVERSION_MAJOR >= 36: result = pack(fmt, *self.values()) else: @@ -1291,7 +1444,8 @@ class ProgramBlock(dict): block address inside mdf file ''' - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(ProgramBlock, self).__init__() @@ -1304,15 +1458,21 @@ def __init__(self, **kargs): self['block_len']) = unpack('<2sH', stream.read(4)) self['data'] = stream.read(self['block_len'] - 4) + if self['id'] != b'PR': + message = 'Expected "PR" block but found "{}"' + raise MdfException(message.format(self['id'])) + except KeyError: - pass + self['id'] = b'PR' + self['block_len'] = len(kargs['data']) + 6 + self['data'] = kargs['data'] def __bytes__(self): - fmt = v3c.FMT_PROGRAM_BLOCK.format(self['block_len']) + fmt = v23c.FMT_PROGRAM_BLOCK.format(self['block_len']) if PYVERSION_MAJOR >= 36: result = pack(fmt, *self.values()) else: - result = pack(fmt, *[self[key] for key in v3c.KEYS_PROGRAM_BLOCK]) + result = pack(fmt, *[self[key] for key in v23c.KEYS_PROGRAM_BLOCK]) return result @@ -1346,7 +1506,8 @@ class SampleReduction(dict): block address inside mdf file ''' - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(SampleReduction, self).__init__() @@ -1361,17 +1522,21 @@ def __init__(self, **kargs): self['data_block_addr'], self['cycles_nr'], self['time_interval']) = unpack( - v3c.FMT_SAMPLE_REDUCTION_BLOCK, - stream.read(v3c.SR_BLOCK_SIZE), + v23c.FMT_SAMPLE_REDUCTION_BLOCK, + stream.read(v23c.SR_BLOCK_SIZE), ) + if self['id'] != b'SR': + message = 'Expected "SR" block but found "{}"' + raise MdfException(message.format(self['id'])) + except KeyError: pass def __bytes__(self): result = pack( - v3c.FMT_SAMPLE_REDUCTION_BLOCK, - *[self[key] for key in v3c.KEYS_SAMPLE_REDUCTION_BLOCK] + v23c.FMT_SAMPLE_REDUCTION_BLOCK, + *[self[key] for key in v23c.KEYS_SAMPLE_REDUCTION_BLOCK] ) return result @@ -1416,7 +1581,8 @@ class TextBlock(dict): b'VehicleSpeed' ''' - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(TextBlock, self).__init__() try: @@ -1429,6 +1595,10 @@ def __init__(self, **kargs): size = self['block_len'] - 4 self['text'] = stream.read(size) + if self['id'] != b'TX': + message = 'Expected "TX" block but found "{}"' + raise MdfException(message.format(self['id'])) + except KeyError: self.address = 0 text = kargs['text'] @@ -1450,11 +1620,11 @@ def __init__(self, **kargs): def __bytes__(self): if PYVERSION_MAJOR >= 36: - result = pack('<2sH{}s'.format(self['block_len']-4), *self.values()) + result = pack('<2sH{}s'.format(self['block_len'] - 4), *self.values()) else: result = pack( - '<2sH{}s'.format(self['block_len']-4), - *[self[key] for key in v3c.KEYS_TEXT_BLOCK] + '<2sH{}s'.format(self['block_len'] - 4), + *[self[key] for key in v23c.KEYS_TEXT_BLOCK] ) return result @@ -1470,7 +1640,7 @@ class TriggerBlock(dict): The keys have the following meaning: - * id - Block type identifier, always "TX" + * id - Block type identifier, always "TR" * block_len - Block size of this block in bytes (entire TRBLOCK) * text_addr - Pointer to trigger comment text (TXBLOCK) (NIL allowed) * trigger_events_nr - Number of trigger events n (0 allowed) @@ -1491,7 +1661,8 @@ class TriggerBlock(dict): block address inside mdf file ''' - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(TriggerBlock, self).__init__() @@ -1511,11 +1682,15 @@ def __init__(self, **kargs): nr = self['trigger_events_nr'] if nr: - values = unpack('<{}d'.format(3*nr), block[10:]) + values = unpack('<{}d'.format(3 * nr), block[10:]) for i in range(nr): (self['trigger_{}_time'.format(i)], self['trigger_{}_pretime'.format(i)], - self['trigger_{}_posttime'.format(i)]) = values[i*3], values[3*i+1], values[3*i+2] + self['trigger_{}_posttime'.format(i)]) = values[i * 3], values[3 * i + 1], values[3 * i + 2] + + if self['id'] != b'TR': + message = 'Expected "TR" block but found "{}"' + raise MdfException(message.format(self['id'])) except KeyError: self['id'] = b'TR' diff --git a/asammdf/v3constants.py b/asammdf/v2_v3_constants.py similarity index 86% rename from asammdf/v3constants.py rename to asammdf/v2_v3_constants.py index 61625b4b1..7675ed11f 100644 --- a/asammdf/v3constants.py +++ b/asammdf/v2_v3_constants.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -""" MDF v3 constants """ +""" MDF v2 and v3 constants """ # byte order BYTE_ORDER_INTEL = 0 @@ -87,12 +87,14 @@ HEADER_POST_320_EXTRA_SIZE = 44 CE_BLOCK_SIZE = 128 FH_BLOCK_SIZE = 56 -DG31_BLOCK_SIZE = 24 -DG32_BLOCK_SIZE = 28 +DG_PRE_320_BLOCK_SIZE = 24 +DG_POST_320_BLOCK_SIZE = 28 HD_BLOCK_SIZE = 104 -CN_BLOCK_SIZE = 228 -CG_BLOCK_SIZE = 26 -CG33_BLOCK_SIZE = 30 +CN_DISPLAYNAME_BLOCK_SIZE = 228 +CN_SHORT_BLOCK_SIZE = 218 +CN_LONGNAME_BLOCK_SIZE = 222 +CG_PRE_330_BLOCK_SIZE = 26 +CG_POST_330_BLOCK_SIZE = 30 DT_BLOCK_SIZE = 24 CC_COMMON_BLOCK_SIZE = 46 CC_COMMON_SHORT_SIZE = 42 @@ -154,8 +156,8 @@ 'timer_identification', ) -FMT_CHANNEL = '<2sH5IH32s128s4H3d2IH' -KEYS_CHANNEL = ( +FMT_CHANNEL_DISPLAYNAME = '<2sH5IH32s128s4H3d2IH' +KEYS_CHANNEL_DISPLAYNAME = ( 'id', 'block_len', 'next_ch_addr', @@ -178,6 +180,29 @@ 'aditional_byte_offset', ) +FMT_CHANNEL_SHORT = '<2sH5IH32s128s4H3d' +FMT_CHANNEL_LONGNAME = FMT_CHANNEL_SHORT + 'I' +KEYS_CHANNEL_SHORT = ( + 'id', + 'block_len', + 'next_ch_addr', + 'conversion_addr', + 'source_depend_addr', + 'ch_depend_addr', + 'comment_addr', + 'channel_type', + 'short_name', + 'description', + 'start_offset', + 'bit_count', + 'data_type', + 'range_flag', + 'min_raw_value', + 'max_raw_value', + 'sampling_rate', +) +KEYS_CHANNEL_LONGNAME = KEYS_CHANNEL_SHORT + ('long_name_addr',) + FMT_CHANNEL_GROUP = '<2sH3I3HI' KEYS_CHANNEL_GROUP = ( 'id', @@ -191,8 +216,8 @@ 'cycles_nr', ) -FMT_DATA_GROUP_32 = '<2sH4I2H4s' -KEYS_DATA_GROUP_32 = ( +FMT_DATA_GROUP_POST_320 = '<2sH4I2H4s' +KEYS_DATA_GROUP_POST_320 = ( 'id', 'block_len', 'next_dg_addr', @@ -204,8 +229,8 @@ 'reserved0', ) -FMT_DATA_GROUP = '<2sH4I2H' -KEYS_DATA_GROUP = ( +FMT_DATA_GROUP_PRE_320 = '<2sH4I2H' +KEYS_DATA_GROUP_PRE_320 = ( 'id', 'block_len', 'next_dg_addr', diff --git a/asammdf/v2blocks.py b/asammdf/v2blocks.py deleted file mode 100644 index ec51cb909..000000000 --- a/asammdf/v2blocks.py +++ /dev/null @@ -1,1570 +0,0 @@ -# -*- coding: utf-8 -*- -""" -classes that implement the blocks for MDF version 3 -""" - -from __future__ import print_function, division -import sys -import time - -from struct import unpack, pack, unpack_from -from getpass import getuser - -from . import v2constants as v2c - - -PYVERSION = sys.version_info[0] -PYVERSION_MAJOR = sys.version_info[0] * 10 + sys.version_info[1] -SEEK_START = v2c.SEEK_START -SEEK_END = v2c.SEEK_END - - -__all__ = [ - 'Channel', - 'ChannelConversion', - 'ChannelDependency', - 'ChannelExtension', - 'ChannelGroup', - 'DataBlock', - 'DataGroup', - 'FileIdentificationBlock', - 'HeaderBlock', - 'ProgramBlock', - 'SampleReduction', - 'TextBlock', - 'TriggerBlock', -] - - -class Channel(dict): - ''' CNBLOCK class derived from *dict* - - The Channel object can be created in two modes: - - * using the *stream* and *address* keyword parameters - when reading - from file - * using any of the following presented keys - when creating a new Channel - - The keys have the following meaning: - - * id - Block type identifier, always "CN" - * block_len - Block size of this block in bytes (entire CNBLOCK) - * next_ch_addr - Pointer to next channel block (CNBLOCK) of this channel - group (NIL allowed) - * conversion_addr - Pointer to the conversion formula (CCBLOCK) of this - signal (NIL allowed) - * source_depend_addr - Pointer to the source-depending extensions (CEBLOCK) - of this signal (NIL allowed) - * ch_depend_addr - Pointer to the dependency block (CDBLOCK) of this signal - (NIL allowed) - * comment_addr - Pointer to the channel comment (TXBLOCK) of this signal - (NIL allowed) - * channel_type - Channel type - - * 0 = data channel - * 1 = time channel for all signals of this group (in each channel group, - exactly one channel must be defined as time channel). - The time stamps recording in a time channel are always relative - to the start time of the measurement defined in HDBLOCK. - - * short_name - Short signal name, i.e. the first 31 characters of the - ASAM-MCD name of the signal (end of text should be indicated by 0) - * description - Signal description (end of text should be indicated by 0) - * start_offset - Start offset in bits to determine the first bit of the - signal in the data record. The start offset N is divided into two parts: - a "Byte offset" (= N div 8) and a "Bit offset" (= N mod 8). - The channel block can define an "additional Byte offset" (see below) - which must be added to the Byte offset. - * bit_count - Number of bits used to encode the value of this signal in a - data record - * data_type - Signal data type - * range_flag - Value range valid flag - * min_raw_value - Minimum signal value that occurred for this signal - (raw value) - * max_raw_value - Maximum signal value that occurred for this signal - (raw value) - * sampling_rate - Sampling rate for a virtual time channel. Unit [s] - * long_name_addr - Pointer to TXBLOCK that contains the ASAM-MCD long signal - name - * display_name_addr - Pointer to TXBLOCK that contains the signal's display - name (NIL allowed) - * aditional_byte_offset - Additional Byte offset of the signal in the data - record (default value: 0). - - Parameters - ---------- - stream : file handle - mdf file handle - address : int - block address inside mdf file - - Attributes - ---------- - name : str - full channel name - address : int - block address inside mdf file - dependencies : list - lsit of channel dependencies - - Examples - -------- - >>> with open('test.mdf', 'rb') as mdf: - ... ch1 = Channel(stream=mdf, address=0xBA52) - >>> ch2 = Channel() - >>> ch1.name - 'VehicleSpeed' - >>> ch1['id'] - b'CN' - - ''' - __slots__ = ['name', 'address'] - - def __init__(self, **kargs): - super(Channel, self).__init__() - - self.name = '' - - try: - stream = kargs['stream'] - self.address = address = kargs['address'] - stream.seek(address + 2, SEEK_START) - size = unpack('= 36: - result = pack(fmt, *self.values()) - else: - result = pack(fmt, *[self[key] for key in keys]) - return result - - def __lt__(self, other): - self_start = self['start_offset'] - other_start = other['start_offset'] - if self_start < other_start: - result = 1 - elif self_start == other_start: - if self['bit_count'] >= other['bit_count']: - result = 1 - else: - result = 0 - else: - result = 0 - return result - - -class ChannelConversion(dict): - ''' CCBLOCK class derived from *dict* - - The ChannelConversion object can be created in two modes: - - * using the *stream* and *address* keyword parameters - when reading - from file - * using any of the following presented keys - when creating a new - ChannelConversion - - The first keys are common for all conversion types, and are followed by - conversion specific keys. The keys have the following meaning: - - * common keys - - * id - Block type identifier, always "CC" - * block_len - Block size of this block in bytes (entire CCBLOCK) - * range_flag - Physical value range valid flag: - * min_phy_value - Minimum physical signal value that occurred for this - signal - * max_phy_value - Maximum physical signal value that occurred for this - signal - * unit - Physical unit (string should be terminated with 0) - * conversion_type - Conversion type (formula identifier) - * ref_param_nr - Size information about additional conversion data - - * specific keys - - * linear conversion - - * b - offset - * a - factor - * CANapeHiddenExtra - sometimes CANape appends extra information; - not compliant with MDF specs - - * ASAM formula conversion - - * formula - ecuation as string - - * polynomial or rational conversion - - * P1 .. P6 - factors - - * exponential or logarithmic conversion - - * P1 .. P7 - factors - - * tabular with or without interpolation (grouped by *n*) - - * raw_{n} - n-th raw integer value (X axis) - * phys_{n} - n-th physical value (Y axis) - - * text table conversion - - * param_val_{n} - n-th integers value (X axis) - * text_{n} - n-th text value (Y axis) - - * text range table conversion - - * lower_{n} - n-th lower raw value - * upper_{n} - n-th upper raw value - * text_{n} - n-th text value - - Parameters - ---------- - stream : file handle - mdf file handle - address : int - block address inside mdf file - - Attributes - ---------- - address : int - block address inside mdf file - - Examples - -------- - >>> with open('test.mdf', 'rb') as mdf: - ... cc1 = ChannelConversion(stream=mdf, address=0xBA52) - >>> cc2 = ChannelConversion(conversion_type=0) - >>> cc1['b'], cc1['a'] - 0, 100.0 - - ''' - __slots__ = ['address',] - def __init__(self, **kargs): - super(ChannelConversion, self).__init__() - - try: - stream = kargs['stream'] - self.address = address = kargs['address'] - stream.seek(address, SEEK_START) - block = stream.read(4) - (self['id'], - self['block_len']) = unpack('<2sH', block) - size = self['block_len'] - block = stream.read(size - 4) - - (self['range_flag'], - self['min_phy_value'], - self['max_phy_value'], - self['unit'], - self['conversion_type'], - self['ref_param_nr']) = unpack_from( - v2c.FMT_CONVERSION_COMMON_SHORT, - block, - ) - - conv_type = self['conversion_type'] - - if conv_type == v2c.CONVERSION_TYPE_LINEAR: - (self['b'], - self['a']) = unpack_from( - '<2d', - block, - v2c.CC_COMMON_SHORT_SIZE, - ) - if not size == v2c.CC_LIN_BLOCK_SIZE: - self['CANapeHiddenExtra'] = block[v2c.CC_LIN_BLOCK_SIZE - 4:] - - elif conv_type == v2c.CONVERSION_TYPE_NONE: - pass - - elif conv_type == v2c.CONVERSION_TYPE_FORMULA: - self['formula'] = block[v2c.CC_COMMON_SHORT_SIZE:] - - elif conv_type in ( - v2c.CONVERSION_TYPE_TABI, - v2c.CONVERSION_TYPE_TABX): - nr = self['ref_param_nr'] - values = unpack_from( - '<{}d'.format(2*nr), - block, - v2c.CC_COMMON_SHORT_SIZE, - ) - for i in range(nr): - (self['raw_{}'.format(i)], - self['phys_{}'.format(i)]) = values[i*2], values[2*i+1] - - elif conv_type in ( - v2c.CONVERSION_TYPE_POLY, - v2c.CONVERSION_TYPE_RAT): - (self['P1'], - self['P2'], - self['P3'], - self['P4'], - self['P5'], - self['P6']) = unpack_from('<6d', block) - - elif conv_type in ( - v2c.CONVERSION_TYPE_EXPO, - v2c.CONVERSION_TYPE_LOGH): - (self['P1'], - self['P2'], - self['P3'], - self['P4'], - self['P5'], - self['P6'], - self['P7']) = unpack_from( - '<7d', - block, - v2c.CC_COMMON_SHORT_SIZE, - ) - - elif conv_type == v2c.CONVERSION_TYPE_VTAB: - nr = self['ref_param_nr'] - - values = unpack_from( - '<' + 'd32s' * nr, - block, - v2c.CC_COMMON_SHORT_SIZE, - ) - - for i in range(nr): - (self['param_val_{}'.format(i)], - self['text_{}'.format(i)]) = values[i*2], values[2*i+1] - - elif conv_type == v2c.CONVERSION_TYPE_VTABR: - nr = self['ref_param_nr'] - - values = unpack_from( - '<' + '2dI' * nr, - block, - v2c.CC_COMMON_SHORT_SIZE, - ) - for i in range(nr): - (self['lower_{}'.format(i)], - self['upper_{}'.format(i)], - self['text_{}'.format(i)]) = values[i*3], values[3*i+1], values[3*i+2] - except KeyError: - self.address = 0 - self['id'] = 'CC'.encode('latin-1') - - if kargs['conversion_type'] == v2c.CONVERSION_TYPE_NONE: - self['block_len'] = kargs.get( - 'block_len', - v2c.CC_COMMON_BLOCK_SIZE, - ) - self['range_flag'] = kargs.get('range_flag', 1) - self['min_phy_value'] = kargs.get('min_phy_value', 0) - self['max_phy_value'] = kargs.get('max_phy_value', 0) - self['unit'] = kargs.get('unit', ('\0'*20).encode('latin-1')) - self['conversion_type'] = v2c.CONVERSION_TYPE_NONE - self['ref_param_nr'] = kargs.get('ref_param_nr', 0) - - elif kargs['conversion_type'] == v2c.CONVERSION_TYPE_LINEAR: - self['block_len'] = kargs.get( - 'block_len', - v2c.CC_LIN_BLOCK_SIZE, - ) - self['range_flag'] = kargs.get('range_flag', 1) - self['min_phy_value'] = kargs.get('min_phy_value', 0) - self['max_phy_value'] = kargs.get('max_phy_value', 0) - self['unit'] = kargs.get('unit', ('\0'*20).encode('latin-1')) - self['conversion_type'] = v2c.CONVERSION_TYPE_LINEAR - self['ref_param_nr'] = kargs.get('ref_param_nr', 2) - self['b'] = kargs.get('b', 0) - self['a'] = kargs.get('a', 1) - if not self['block_len'] == v2c.CC_LIN_BLOCK_SIZE: - self['CANapeHiddenExtra'] = kargs['CANapeHiddenExtra'] - - elif kargs['conversion_type'] in ( - v2c.CONVERSION_TYPE_POLY, - v2c.CONVERSION_TYPE_RAT): - self['block_len'] = kargs.get( - 'block_len', - v2c.CC_POLY_BLOCK_SIZE, - ) - self['range_flag'] = kargs.get('range_flag', 1) - self['min_phy_value'] = kargs.get('min_phy_value', 0) - self['max_phy_value'] = kargs.get('max_phy_value', 0) - self['unit'] = kargs.get('unit', ('\0'*20).encode('latin-1')) - self['conversion_type'] = kargs.get( - 'conversion_type', - v2c.CONVERSION_TYPE_POLY, - ) - self['ref_param_nr'] = kargs.get('ref_param_nr', 2) - self['P1'] = kargs.get('P1', 0) - self['P2'] = kargs.get('P2', 0) - self['P3'] = kargs.get('P3', 0) - self['P4'] = kargs.get('P4', 0) - self['P5'] = kargs.get('P5', 0) - self['P6'] = kargs.get('P6', 0) - - elif kargs['conversion_type'] in ( - v2c.CONVERSION_TYPE_EXPO, - v2c.CONVERSION_TYPE_LOGH): - self['block_len'] = kargs.get( - 'block_len', - v2c.CC_EXPO_BLOCK_SIZE, - ) - self['range_flag'] = kargs.get('range_flag', 1) - self['min_phy_value'] = kargs.get('min_phy_value', 0) - self['max_phy_value'] = kargs.get('max_phy_value', 0) - self['unit'] = kargs.get('unit', ('\0'*20).encode('latin-1')) - self['conversion_type'] = kargs.get( - 'conversion_type', - v2c.CONVERSION_TYPE_EXPO, - ) - self['ref_param_nr'] = kargs.get('ref_param_nr', 2) - self['P1'] = kargs.get('P1', 0) - self['P2'] = kargs.get('P2', 0) - self['P3'] = kargs.get('P3', 0) - self['P4'] = kargs.get('P4', 0) - self['P5'] = kargs.get('P5', 0) - self['P6'] = kargs.get('P6', 0) - self['P7'] = kargs.get('P7', 0) - - elif kargs['conversion_type'] == v2c.CONVERSION_TYPE_FORMULA: - self['block_len'] = kargs.get( - 'block_len', - v2c.CC_POLY_BLOCK_SIZE, - ) - self['range_flag'] = kargs.get('range_flag', 1) - self['min_phy_value'] = kargs.get('min_phy_value', 0) - self['max_phy_value'] = kargs.get('max_phy_value', 0) - self['unit'] = kargs.get('unit', ('\0'*20).encode('latin-1')) - self['conversion_type'] = kargs.get( - 'conversion_type', - v2c.CONVERSION_TYPE_FORMULA, - ) - self['ref_param_nr'] = kargs.get('ref_param_nr', 2) - self['formula'] = kargs.get('formula', b'X1'+b'\0'*254) - - elif kargs['conversion_type'] in ( - v2c.CONVERSION_TYPE_TABI, - v2c.CONVERSION_TYPE_TABX): - nr = kargs['ref_param_nr'] - self['block_len'] = kargs['block_len'] - self['range_flag'] = kargs.get('range_flag', 1) - self['min_phy_value'] = kargs.get('min_phy_value', 0) - self['max_phy_value'] = kargs.get('max_phy_value', 0) - self['unit'] = kargs.get('unit', ('\0'*20).encode('latin-1')) - self['conversion_type'] = kargs.get( - 'conversion_type', - v2c.CONVERSION_TYPE_TABI, - ) - self['ref_param_nr'] = kargs.get('ref_param_nr', 2) - for i in range(nr): - self['raw_{}'.format(i)] = kargs['raw_{}'.format(i)] - self['phys_{}'.format(i)] = kargs['phys_{}'.format(i)] - - elif kargs['conversion_type'] == v2c.CONVERSION_TYPE_VTAB: - nr = kargs['ref_param_nr'] - self['block_len'] = kargs.get( - 'block_len', - v2c.CC_COMMON_BLOCK_SIZE + 40*nr, - ) - self['range_flag'] = kargs.get('range_flag', 0) - self['min_phy_value'] = kargs.get('min_phy_value', 0) - self['max_phy_value'] = kargs.get('max_phy_value', 0) - self['unit'] = kargs.get('unit', ('\0'*20).encode('latin-1')) - self['conversion_type'] = v2c.CONVERSION_TYPE_VTAB - self['ref_param_nr'] = nr - - for i in range(nr): - self['param_val_{}'.format(i)] = kargs['param_val_{}'.format(i)] - self['text_{}'.format(i)] = kargs['text_{}'.format(i)] - - elif kargs['conversion_type'] == v2c.CONVERSION_TYPE_VTABR: - nr = kargs.get('ref_param_nr', 0) - self['block_len'] = kargs.get( - 'block_len', - v2c.CC_COMMON_BLOCK_SIZE + 20*nr, - ) - self['range_flag'] = kargs.get('range_flag', 0) - self['min_phy_value'] = kargs.get('min_phy_value', 0) - self['max_phy_value'] = kargs.get('max_phy_value', 0) - self['unit'] = kargs.get('unit', ('\0'*20).encode('latin-1')) - self['conversion_type'] = v2c.CONVERSION_TYPE_VTABR - self['ref_param_nr'] = kargs.get('ref_param_nr', 0) - - for i in range(self['ref_param_nr']): - self['lower_{}'.format(i)] = kargs['lower_{}'.format(i)] - self['upper_{}'.format(i)] = kargs['upper_{}'.format(i)] - self['text_{}'.format(i)] = kargs['text_{}'.format(i)] - else: - message = 'Conversion type "{}" not implemented' - message = message.format(kargs['conversion_type']) - raise Exception(message) - - def __bytes__(self): - conv = self['conversion_type'] - - # compute the fmt - if conv == v2c.CONVERSION_TYPE_NONE: - fmt = v2c.FMT_CONVERSION_COMMON - elif conv == v2c.CONVERSION_TYPE_FORMULA: - fmt = v2c.FMT_CONVERSION_FORMULA - elif conv == v2c.CONVERSION_TYPE_LINEAR: - fmt = v2c.FMT_CONVERSION_LINEAR - if not self['block_len'] == v2c.CC_LIN_BLOCK_SIZE: - fmt += '{}s'.format(self['block_len'] - v2c.CC_LIN_BLOCK_SIZE) - elif conv in (v2c.CONVERSION_TYPE_POLY, v2c.CONVERSION_TYPE_RAT): - fmt = v2c.FMT_CONVERSION_POLY_RAT - elif conv in (v2c.CONVERSION_TYPE_EXPO, v2c.CONVERSION_TYPE_LOGH): - fmt = v2c.FMT_CONVERSION_EXPO_LOGH - elif conv in (v2c.CONVERSION_TYPE_TABI, v2c.CONVERSION_TYPE_TABX): - nr = self['ref_param_nr'] - fmt = v2c.FMT_CONVERSION_COMMON + '{}d'.format(nr * 2) - elif conv == v2c.CONVERSION_TYPE_VTABR: - nr = self['ref_param_nr'] - fmt = v2c.FMT_CONVERSION_COMMON + '2dI' * nr - elif conv == v2c.CONVERSION_TYPE_VTAB: - nr = self['ref_param_nr'] - fmt = v2c.FMT_CONVERSION_COMMON + 'd32s' * nr - - # compute the keys only for Python < 3.6 - if PYVERSION_MAJOR < 36: - if conv == v2c.CONVERSION_TYPE_NONE: - keys = v2c.KEYS_CONVESION_NONE - elif conv == v2c.CONVERSION_TYPE_FORMULA: - keys = v2c.KEYS_CONVESION_FORMULA - elif conv == v2c.CONVERSION_TYPE_LINEAR: - keys = v2c.KEYS_CONVESION_LINEAR - if not self['block_len'] == v2c.CC_LIN_BLOCK_SIZE: - keys += ('CANapeHiddenExtra',) - elif conv in (v2c.CONVERSION_TYPE_POLY, v2c.CONVERSION_TYPE_RAT): - keys = v2c.KEYS_CONVESION_POLY_RAT - elif conv in (v2c.CONVERSION_TYPE_EXPO, v2c.CONVERSION_TYPE_LOGH): - keys = v2c.KEYS_CONVESION_EXPO_LOGH - elif conv in (v2c.CONVERSION_TYPE_TABI, v2c.CONVERSION_TYPE_TABX): - nr = self['ref_param_nr'] - keys = list(v2c.KEYS_CONVESION_NONE) - for i in range(nr): - keys.append('raw_{}'.format(i)) - keys.append('phys_{}'.format(i)) - elif conv == v2c.CONVERSION_TYPE_VTABR: - nr = self['ref_param_nr'] - keys = list(v2c.KEYS_CONVESION_NONE) - for i in range(nr): - keys.append('lower_{}'.format(i)) - keys.append('upper_{}'.format(i)) - keys.append('text_{}'.format(i)) - elif conv == v2c.CONVERSION_TYPE_VTAB: - nr = self['ref_param_nr'] - keys = list(v2c.KEYS_CONVESION_NONE) - for i in range(nr): - keys.append('param_val_{}'.format(i)) - keys.append('text_{}'.format(i)) - - if PYVERSION_MAJOR >= 36: - result = pack(fmt, *self.values()) - else: - result = pack(fmt, *[self[key] for key in keys]) - return result - - -class ChannelDependency(dict): - ''' CDBLOCK class derived from *dict* - - Currently the ChannelDependency object can only be created using the - *stream* and *address* keyword parameters when reading from file - - The keys have the following meaning: - - * id - Block type identifier, always "CD" - * block_len - Block size of this block in bytes (entire CDBLOCK) - * dependency_type - Dependency type - * sd_nr - Total number of signals dependencies (m) - * for each dependency there is a group of three keys: - - * dg_{n} - Pointer to the data group block (DGBLOCK) of signal - dependency *n* - * cg_{n} - Pointer to the channel group block (DGBLOCK) of signal - dependency *n* - * ch_{n} - Pointer to the channel block (DGBLOCK) of signal dependency - *n* - - * there can also be optional keys which decribe dimensions for the - N-dimensional dependencies: - - * dim_{n} - Optional: size of dimension *n* for N-dimensional dependency - - Parameters - ---------- - stream : file handle - mdf file handle - address : int - block address inside mdf file - - Attributes - ---------- - address : int - block address inside mdf file - - ''' - __slots__ = ['address', 'referenced_channels'] - def __init__(self, **kargs): - super(ChannelDependency, self).__init__() - - self.referenced_channels = [] - - try: - stream = kargs['stream'] - self.address = address = kargs['address'] - stream.seek(address, SEEK_START) - - (self['id'], - self['block_len'], - self['dependency_type'], - self['sd_nr']) = unpack('<2s3H', stream.read(8)) - - links_size = 3 * 4 * self['sd_nr'] - links = unpack( - '<{}I'.format(3 * self['sd_nr']), - stream.read(links_size), - ) - - for i in range(self['sd_nr']): - self['dg_{}'.format(i)] = links[3*i] - self['cg_{}'.format(i)] = links[3*i+1] - self['ch_{}'.format(i)] = links[3*i+2] - - optional_dims_nr = (self['block_len'] - 8 - links_size) // 2 - if optional_dims_nr: - dims = unpack( - '<{}H'.format(optional_dims_nr), - stream.read(optional_dims_nr * 2), - ) - for i, dim in enumerate(dims): - self['dim_{}'.format(i)] = dim - - except KeyError: - sd_nr = kargs['sd_nr'] - self['id'] = b'CD' - self['block_len'] = 8 + 3 * 4 * sd_nr - self['dependency_type'] = 1 - self['sd_nr'] = sd_nr - for i in range(sd_nr): - self['dg_{}'.format(i)] = 0 - self['cg_{}'.format(i)] = 0 - self['ch_{}'.format(i)] = 0 - i = 0 - while True: - try: - self['dim_{}'.format(i)] = kargs['dim_{}'.format(i)] - i += 1 - except KeyError: - break - if i: - self['dependency_type'] = 256 + i - self['block_len'] += 2 * i - - def __bytes__(self): - fmt = '<2s3H{}I'.format(self['sd_nr'] * 3) - keys = ('id', 'block_len', 'dependency_type', 'sd_nr') - for i in range(self['sd_nr']): - keys += ('dg_{}'.format(i), 'cg_{}'.format(i), 'ch_{}'.format(i)) - links_size = 3 * 4 * self['sd_nr'] - option_dims_nr = (self['block_len'] - 8 - links_size) // 2 - if option_dims_nr: - fmt += '{}H'.format(option_dims_nr) - keys += tuple('dim_{}'.format(i) for i in range(option_dims_nr)) - if PYVERSION_MAJOR >= 36: - result = pack(fmt, *self.values()) - else: - result = pack(fmt, *[self[key] for key in keys]) - return result - - -class ChannelExtension(dict): - ''' CEBLOCK class derived from *dict* - - The ChannelExtension object can be created in two modes: - - * using the *stream* and *address* keyword parameters - when reading from - file - * using any of the following presented keys - when creating a new - ChannelExtension - - The first keys are common for all conversion types, and are followed by - conversion specific keys. The keys have the following meaning: - - * common keys - - * id - Block type identifier, always "CE" - * block_len - Block size of this block in bytes (entire CEBLOCK) - * type - Extension type identifier - - * specific keys - - * for DIM block - - * module_nr - Number of module - * module_address - Address - * description - Description - * ECU_identification - Identification of ECU - * reserved0' - reserved - - * for Vector CAN block - - * CAN_id - Identifier of CAN message - * CAN_ch_index - Index of CAN channel - * message_name - Name of message (string should be terminated by 0) - * sender_name - Name of sender (string should be terminated by 0) - * reserved0 - reserved - - Parameters - ---------- - stream : file handle - mdf file handle - address : int - block address inside mdf file - - Attributes - ---------- - address : int - block address inside mdf file - - ''' - __slots__ = ['address',] - def __init__(self, **kargs): - super(ChannelExtension, self).__init__() - - try: - stream = kargs['stream'] - self.address = address = kargs['address'] - stream.seek(address, SEEK_START) - (self['id'], - self['block_len'], - self['type']) = unpack(v2c.FMT_SOURCE_COMMON, stream.read(6)) - block = stream.read(self['block_len'] - 6) - - if self['type'] == v2c.SOURCE_ECU: - (self['module_nr'], - self['module_address'], - self['description'], - self['ECU_identification'], - self['reserved0']) = unpack(v2c.FMT_SOURCE_EXTRA_ECU, block) - elif self['type'] == v2c.SOURCE_VECTOR: - (self['CAN_id'], - self['CAN_ch_index'], - self['message_name'], - self['sender_name'], - self['reserved0']) = unpack(v2c.FMT_SOURCE_EXTRA_VECTOR, block) - except KeyError: - - self.address = 0 - self['id'] = kargs.get('id', 'CE'.encode('latin-1')) - self['block_len'] = kargs.get('block_len', v2c.CE_BLOCK_SIZE) - self['type'] = kargs.get('type', 2) - if self['type'] == v2c.SOURCE_ECU: - self['module_nr'] = kargs.get('module_nr', 0) - self['module_address'] = kargs.get('module_address', 0) - self['description'] = kargs.get('description', b'\0') - self['ECU_identification'] = kargs.get( - 'ECU_identification', - b'\0', - ) - self['reserved0'] = kargs.get('reserved0', b'\0') - elif self['type'] == v2c.SOURCE_VECTOR: - self['CAN_id'] = kargs.get('CAN_id', 0) - self['CAN_ch_index'] = kargs.get('CAN_ch_index', 0) - self['message_name'] = kargs.get('message_name', b'\0') - self['sender_name'] = kargs.get('sender_name', b'\0') - self['reserved0'] = kargs.get('reserved0', b'\0') - - def __bytes__(self): - typ = self['type'] - if typ == v2c.SOURCE_ECU: - fmt = v2c.FMT_SOURCE_ECU - keys = v2c.KEYS_SOURCE_ECU - else: - fmt = v2c.FMT_SOURCE_VECTOR - keys = v2c.KEYS_SOURCE_VECTOR - - if PYVERSION_MAJOR >= 36: - result = pack(fmt, *self.values()) - else: - result = pack(fmt, *[self[key] for key in keys]) - return result - - -class ChannelGroup(dict): - ''' CGBLOCK class derived from *dict* - - The ChannelGroup object can be created in two modes: - - * using the *stream* and *address* keyword parameters - when reading from - file - * using any of the following presented keys - when creating a new - ChannelGroup - - The keys have the following meaning: - - * id - Block type identifier, always "CG" - * block_len - Block size of this block in bytes (entire CGBLOCK) - * next_cg_addr - Pointer to next channel group block (CGBLOCK) (NIL allowed) - * first_ch_addr - Pointer to first channel block (CNBLOCK) (NIL allowed) - * comment_addr - Pointer to channel group comment text (TXBLOCK) - (NIL allowed) - * record_id - Record ID, i.e. value of the identifier for a record if the - DGBLOCK defines a number of record IDs > 0 - * ch_nr - Number of channels (redundant information) - * samples_byte_nr - Size of data record in Bytes (without record ID), i.e. - size of plain data for a each recorded sample of this channel group - * cycles_nr - Number of records of this type in the data block i.e. number - of samples for this channel group - * sample_reduction_addr - only since version 3.3. Pointer to first sample - reduction block (SRBLOCK) (NIL allowed) Default value: NIL. - - Parameters - ---------- - stream : file handle - mdf file handle - address : int - block address inside mdf file - - Attributes - ---------- - address : int - block address inside mdf file - - Examples - -------- - >>> with open('test.mdf', 'rb') as mdf: - ... cg1 = ChannelGroup(stream=mdf, address=0xBA52) - >>> cg2 = ChannelGroup(sample_bytes_nr=32) - >>> hex(cg1.address) - 0xBA52 - >>> cg1['id'] - b'CG' - - ''' - __slots__ = ['address',] - def __init__(self, **kargs): - super(ChannelGroup, self).__init__() - - try: - - stream = kargs['stream'] - self.address = address = kargs['address'] - stream.seek(address, SEEK_START) - block = stream.read(v2c.CG_BLOCK_SIZE) - - (self['id'], - self['block_len'], - self['next_cg_addr'], - self['first_ch_addr'], - self['comment_addr'], - self['record_id'], - self['ch_nr'], - self['samples_byte_nr'], - self['cycles_nr']) = unpack(v2c.FMT_CHANNEL_GROUP, block) - if self['block_len'] == v2c.CG33_BLOCK_SIZE: - self['sample_reduction_addr'] = unpack('= 36: - result = pack(fmt, *self.values()) - else: - result = pack(fmt, *[self[key] for key in keys]) - return result - -class DataBlock(dict): - """Data Block class derived from *dict* - - The DataBlock object can be created in two modes: - - * using the *stream*, *address* and *size* keyword parameters - when reading - from file - * using any of the following presented keys - when creating a new - ChannelGroup - - The keys have the following meaning: - - * data - bytes block - - Attributes - ---------- - address : int - block address - - Parameters - ---------- - address : int - block address inside the measurement file - stream : file.io.handle - binary file stream - - """ - __slots__ = ['address',] - def __init__(self, **kargs): - super(DataBlock, self).__init__() - - try: - stream = kargs['stream'] - size = kargs['size'] - self.address = address = kargs['address'] - stream.seek(address, SEEK_START) - - self['data'] = stream.read(size) - - except KeyError: - self.address = 0 - self['data'] = kargs.get('data', b'') - - def __bytes__(self): - return self['data'] - - -class DataGroup(dict): - ''' DGBLOCK class derived from *dict* - - The DataGroup object can be created in two modes: - - * using the *stream* and *address* keyword parameters - when reading from - file - * using any of the following presented keys - when creating a new DataGroup - - The keys have the following meaning: - - * id - Block type identifier, always "DG" - * block_len - Block size of this block in bytes (entire DGBLOCK) - * next_dg_addr - Pointer to next data group block (DGBLOCK) (NIL allowed) - * first_cg_addr - Pointer to first channel group block (CGBLOCK) - (NIL allowed) - * trigger_addr - Pointer to trigger block (TRBLOCK) (NIL allowed) - * data_block_addr - Pointer to the data block - * cg_nr - Number of channel groups (redundant information) - * record_id_nr - Number of record IDs in the data block - * reserved0 - since version 3.2; Reserved - - Parameters - ---------- - stream : file handle - mdf file handle - address : int - block address inside mdf file - - Attributes - ---------- - address : int - block address inside mdf file - - ''' - __slots__ = ['address',] - def __init__(self, **kargs): - super(DataGroup, self).__init__() - - try: - stream = kargs['stream'] - self.address = address = kargs['address'] - stream.seek(address, SEEK_START) - block = stream.read(v2c.DG31_BLOCK_SIZE) - - (self['id'], - self['block_len'], - self['next_dg_addr'], - self['first_cg_addr'], - self['trigger_addr'], - self['data_block_addr'], - self['cg_nr'], - self['record_id_nr']) = unpack(v2c.FMT_DATA_GROUP, block) - - if self['block_len'] == v2c.DG32_BLOCK_SIZE: - self['reserved0'] = stream.read(4) - - except KeyError: - self.address = 0 - self['id'] = kargs.get('id', 'DG'.encode('latin-1')) - self['block_len'] = kargs.get('block_len', v2c. DG32_BLOCK_SIZE) - self['next_dg_addr'] = kargs.get('next_dg_addr', 0) - self['first_cg_addr'] = kargs.get('first_cg_addr', 0) - self['trigger_addr'] = kargs.get('comment_addr', 0) - self['data_block_addr'] = kargs.get('data_block_addr', 0) - self['cg_nr'] = kargs.get('cg_nr', 1) - self['record_id_nr'] = kargs.get('record_id_nr', 0) - if self['block_len'] == v2c.DG32_BLOCK_SIZE: - self['reserved0'] = b'\0\0\0\0' - - def __bytes__(self): - if self['block_len'] == v2c.DG32_BLOCK_SIZE: - fmt = v2c.FMT_DATA_GROUP_32 - keys = v2c.KEYS_DATA_GROUP_32 - else: - fmt = v2c.FMT_DATA_GROUP - keys = v2c.KEYS_DATA_GROUP - if PYVERSION_MAJOR >= 36: - result = pack(fmt, *self.values()) - else: - result = pack(fmt, *[self[key] for key in keys]) - return result - - -class FileIdentificationBlock(dict): - ''' IDBLOCK class derived from *dict* - - The TriggerBlock object can be created in two modes: - - * using the *stream* and *address* keyword parameters - when reading from - file - * using the classmethod *from_text* - - The keys have the following meaning: - - * file_identification - file identifier - * version_str - format identifier - * program_identification - program identifier - * byte_order - default byte order - * float_format - default floating-point format - * mdf_version - version number of MDF format - * code_page - code page number - * reserved0 - reserved - * reserved1 - reserved - * unfinalized_standard_flags - Standard Flags for unfinalized MDF - * unfinalized_custom_flags - Custom Flags for unfinalized MDF - - Parameters - ---------- - stream : file handle - mdf file handle - version : int - mdf version in case of new file - - Attributes - ---------- - address : int - block address inside mdf file; should be 0 always - - ''' - __slots__ = ['address',] - def __init__(self, **kargs): - super(FileIdentificationBlock, self).__init__() - - self.address = 0 - try: - - stream = kargs['stream'] - stream.seek(0, SEEK_START) - - (self['file_identification'], - self['version_str'], - self['program_identification'], - self['byte_order'], - self['float_format'], - self['mdf_version'], - self['code_page'], - self['reserved0'], - self['reserved1'], - self['unfinalized_standard_flags'], - self['unfinalized_custom_flags']) = unpack( - v2c.ID_FMT, - stream.read(v2c.ID_BLOCK_SIZE), - ) - except KeyError: - version = kargs['version'] - self['file_identification'] = 'MDF '.encode('latin-1') - self['version_str'] = version.encode('latin-1') + b'\0' * 4 - self['program_identification'] = 'Python '.encode('latin-1') - self['byte_order'] = v2c.BYTE_ORDER_INTEL - self['float_format'] = 0 - self['mdf_version'] = int(version.replace('.', '')) - self['code_page'] = 0 - self['reserved0'] = b'\0' * 2 - self['reserved1'] = b'\0' * 26 - self['unfinalized_standard_flags'] = 0 - self['unfinalized_custom_flags'] = 0 - - def __bytes__(self): - if PYVERSION_MAJOR >= 36: - result = pack(v2c.ID_FMT, *self.values()) - else: - result = pack( - v2c.ID_FMT, - *[self[key] for key in v2c.ID_KEYS] - ) - return result - - -class HeaderBlock(dict): - ''' HDBLOCK class derived from *dict* - - The TriggerBlock object can be created in two modes: - - * using the *stream* - when reading from file - * using the classmethod *from_text* - - The keys have the following meaning: - - * id - Block type identifier, always "HD" - * block_len - Block size of this block in bytes (entire HDBLOCK) - * first_dg_addr - Pointer to the first data group block (DGBLOCK) - * comment_addr - Pointer to the measurement file comment text (TXBLOCK) (NIL - allowed) - * program_addr - Pointer to program block (PRBLOCK) (NIL allowed) - * dg_nr - Number of data groups (redundant information) - * date - Date at which the recording was started in "DD:MM:YYYY" format - * time - Time at which the recording was started in "HH:MM:SS" format - * author - author name - * organization - organization - * project - project name - * subject - subject - - Since version 3.2 the following extra keys were added: - - * abs_time - Time stamp at which recording was started in nanoseconds. - * tz_offset - UTC time offset in hours (= GMT time zone) - * time_quality - Time quality class - * timer_identification - Timer identification (time source), - - Parameters - ---------- - stream : file handle - mdf file handle - - Attributes - ---------- - address : int - block address inside mdf file; should be 64 always - - ''' - __slots__ = ['address',] - def __init__(self, **kargs): - super(HeaderBlock, self).__init__() - - self.address = 64 - try: - - stream = kargs['stream'] - stream.seek(64, SEEK_START) - - (self['id'], - self['block_len'], - self['first_dg_addr'], - self['comment_addr'], - self['program_addr'], - self['dg_nr'], - self['date'], - self['time'], - self['author'], - self['organization'], - self['project'], - self['subject']) = unpack( - v2c.HEADER_COMMON_FMT, - stream.read(v2c.HEADER_COMMON_SIZE), - ) - - if self['block_len'] > v2c.HEADER_COMMON_SIZE: - (self['abs_time'], - self['tz_offset'], - self['time_quality'], - self['timer_identification']) = unpack( - v2c.HEADER_POST_320_EXTRA_FMT, - stream.read(v2c.HEADER_POST_320_EXTRA_SIZE), - ) - - except KeyError: - self['id'] = 'HD'.encode('latin-1') - self['block_len'] = 164 - self['first_dg_addr'] = 0 - self['comment_addr'] = 0 - self['program_addr'] = 0 - self['dg_nr'] = 0 - t1 = time.time() * 10**9 - t2 = time.gmtime() - self['date'] = '{:\0<10}'.format(time.strftime('%d:%m:%Y', t2)).encode('latin-1') - self['time'] = '{:\0<8}'.format(time.strftime('%X', t2)).encode('latin-1') - self['author'] = '{:\0<32}'.format(getuser()).encode('latin-1') - self['organization'] = '{:\0<32}'.format('').encode('latin-1') - self['project'] = '{:\0<32}'.format('').encode('latin-1') - self['subject'] = '{:\0<32}'.format('').encode('latin-1') - - if self['block_len'] > v2c.HEADER_COMMON_SIZE: - self['abs_time'] = int(t1) - self['tz_offset'] = 2 - self['time_quality'] = 0 - self['timer_identification'] = '{:\0<32}'.format('Local PC Reference Time').encode('latin-1') - - def __bytes__(self): - fmt = v2c.HEADER_COMMON_FMT - keys = v2c.HEADER_COMMON_KEYS - if self['block_len'] > v2c.HEADER_COMMON_SIZE: - fmt += v2c.HEADER_POST_320_EXTRA_FMT - keys += v2c.HEADER_POST_320_EXTRA_KEYS - if PYVERSION_MAJOR >= 36: - result = pack(fmt, *self.values()) - else: - result = pack(fmt, *[self[key] for key in keys]) - return result - - -class ProgramBlock(dict): - ''' PRBLOCK class derived from *dict* - - The ProgramBlock object can be created in two modes: - - * using the *stream* and *address* keyword parameters - when reading from - file - * using any of the following presented keys - when creating a new - ProgramBlock - - The keys have the following meaning: - - * id - Block type identifier, always "PR" - * block_len - Block size of this block in bytes (entire PRBLOCK) - * data - Program-specific data - - Parameters - ---------- - stream : file handle - mdf file handle - address : int - block address inside mdf file - - Attributes - ---------- - address : int - block address inside mdf file - - ''' - __slots__ = ['address',] - def __init__(self, **kargs): - super(ProgramBlock, self).__init__() - - try: - stream = kargs['stream'] - self.address = address = kargs['address'] - stream.seek(address, SEEK_START) - - (self['id'], - self['block_len']) = unpack('<2sH', stream.read(4)) - self['data'] = stream.read(self['block_len'] - 4) - - except KeyError: - pass - - def __bytes__(self): - fmt = v2c.FMT_PROGRAM_BLOCK.format(self['block_len']) - if PYVERSION_MAJOR >= 36: - result = pack(fmt, *self.values()) - else: - result = pack(fmt, *[self[key] for key in v2c.KEYS_PROGRAM_BLOCK]) - return result - - -class SampleReduction(dict): - ''' SRBLOCK class derived from *dict* - - Currently the SampleReduction object can only be created by using the - *stream* and *address* keyword parameters - when reading from file - - The keys have the following meaning: - - * id - Block type identifier, always "SR" - * block_len - Block size of this block in bytes (entire SRBLOCK) - * next_sr_addr - Pointer to next sample reduction block (SRBLOCK) (NIL - allowed) - * data_block_addr - Pointer to the data block for this sample reduction - * cycles_nr - Number of reduced samples in the data block. - * time_interval - Length of time interval [s] used to calculate the reduced - samples. - - Parameters - ---------- - stream : file handle - mdf file handle - address : int - block address inside mdf file - - Attributes - ---------- - address : int - block address inside mdf file - - ''' - __slots__ = ['address',] - def __init__(self, **kargs): - super(SampleReduction, self).__init__() - - try: - stream = kargs['stream'] - self.address = address = kargs['address'] - stream.seek(address, SEEK_START) - - (self['id'], - self['block_len'], - self['next_sr_addr'], - self['data_block_addr'], - self['cycles_nr'], - self['time_interval']) = unpack( - v2c.FMT_SAMPLE_REDUCTION_BLOCK, - stream.read(v2c.SR_BLOCK_SIZE), - ) - - except KeyError: - pass - - def __bytes__(self): - result = pack( - v2c.FMT_SAMPLE_REDUCTION_BLOCK, - *[self[key] for key in v2c.KEYS_SAMPLE_REDUCTION_BLOCK] - ) - return result - - -class TextBlock(dict): - ''' TXBLOCK class derived from *dict* - - The ProgramBlock object can be created in two modes: - - * using the *stream* and *address* keyword parameters - when reading from - file - * using the classmethod *from_text* - - The keys have the following meaning: - - * id - Block type identifier, always "TX" - * block_len - Block size of this block in bytes (entire TXBLOCK) - * text - Text (new line indicated by CR and LF; end of text indicated by 0) - - Parameters - ---------- - stream : file handle - mdf file handle - address : int - block address inside mdf file - text : bytes - bytes for creating a new TextBlock - - Attributes - ---------- - address : int - block address inside mdf file - text_str : str - text data as unicode string - - Examples - -------- - >>> tx1 = TextBlock.from_text('VehicleSpeed') - >>> tx1.text_str - 'VehicleSpeed' - >>> tx1['text'] - b'VehicleSpeed' - - ''' - __slots__ = ['address',] - def __init__(self, **kargs): - super(TextBlock, self).__init__() - try: - stream = kargs['stream'] - self.address = address = kargs['address'] - - stream.seek(address, SEEK_START) - (self['id'], - self['block_len']) = unpack('<2sH', stream.read(4)) - size = self['block_len'] - 4 - self['text'] = stream.read(size) - - except KeyError: - self.address = 0 - text = kargs['text'] - - if PYVERSION == 3: - try: - text = text.encode('utf-8') - except AttributeError: - pass - else: - try: - text = text.encode('utf-8') - except (AttributeError, UnicodeDecodeError): - pass - - self['id'] = b'TX' - self['block_len'] = len(text) + 4 + 1 - self['text'] = text + b'\0' - - def __bytes__(self): - if PYVERSION_MAJOR >= 36: - result = pack( - '<2sH{}s'.format(self['block_len']-4), - *self.values() - ) - else: - result = pack( - '<2sH{}s'.format(self['block_len']-4), - *[self[key] for key in v2c.KEYS_TEXT_BLOCK] - ) - return result - - -class TriggerBlock(dict): - ''' TRBLOCK class derived from *dict* - - The TriggerBlock object can be created in two modes: - - * using the *stream* and *address* keyword parameters - when reading from - file - * using the classmethod *from_text* - - The keys have the following meaning: - - * id - Block type identifier, always "TX" - * block_len - Block size of this block in bytes (entire TRBLOCK) - * text_addr - Pointer to trigger comment text (TXBLOCK) (NIL allowed) - * trigger_events_nr - Number of trigger events n (0 allowed) - * trigger_{n}_time - Trigger time [s] of trigger event *n* - * trigger_{n}_pretime - Pre trigger time [s] of trigger event *n* - * trigger_{n}_posttime - Post trigger time [s] of trigger event *n* - - Parameters - ---------- - stream : file handle - mdf file handle - address : int - block address inside mdf file - - Attributes - ---------- - address : int - block address inside mdf file - - ''' - __slots__ = ['address',] - def __init__(self, **kargs): - super(TriggerBlock, self).__init__() - - try: - self.address = address = kargs['address'] - stream = kargs['stream'] - - stream.seek(address + 2, SEEK_START) - size = unpack('= 36: - result = pack(fmt, *self.values()) - else: - result = pack(fmt, *[self[key] for key in keys]) - return result diff --git a/asammdf/v2constants.py b/asammdf/v2constants.py deleted file mode 100644 index cd9f2fef0..000000000 --- a/asammdf/v2constants.py +++ /dev/null @@ -1,345 +0,0 @@ -# -*- coding: utf-8 -*- -""" MDF v2 constants """ - -# byte order -BYTE_ORDER_INTEL = 0 -BYTE_ORDER_MOTOROLA = 1 - -# data types -DATA_TYPE_UNSIGNED = 0 -DATA_TYPE_SIGNED = 1 -DATA_TYPE_FLOAT = 2 -DATA_TYPE_DOUBLE = 3 -DATA_TYPE_STRING = 7 -DATA_TYPE_BYTEARRAY = 8 -DATA_TYPE_UNSIGNED_INTEL = 13 -DATA_TYPE_UNSIGNED_MOTOROLA = 9 -DATA_TYPE_SIGNED_INTEL = 14 -DATA_TYPE_SIGNED_MOTOROLA = 10 -DATA_TYPE_FLOAT_INTEL = 15 -DATA_TYPE_FLOAT_MOTOROLA = 11 -DATA_TYPE_DOUBLE_INTEL = 16 -DATA_TYPE_DOUBLE_MOTOROLA = 12 - -SIGNED_INT = { - DATA_TYPE_SIGNED, - DATA_TYPE_SIGNED_INTEL, - DATA_TYPE_SIGNED_MOTOROLA, -} -STANDARD_INT_SIZES = {8, 16, 32, 64} - -INT_TYPES = { - DATA_TYPE_UNSIGNED, - DATA_TYPE_SIGNED, - DATA_TYPE_UNSIGNED_INTEL, - DATA_TYPE_UNSIGNED_MOTOROLA, - DATA_TYPE_SIGNED_INTEL, - DATA_TYPE_SIGNED_MOTOROLA, -} - -# channel types -CHANNEL_TYPE_VALUE = 0 -CHANNEL_TYPE_MASTER = 1 - -# channel conversion types -CONVERSION_TYPE_NONE = 65535 -CONVERSION_TYPE_LINEAR = 0 -CONVERSION_TYPE_TABI = 1 -CONVERSION_TYPE_TABX = 2 -CONVERSION_TYPE_POLY = 6 -CONVERSION_TYPE_EXPO = 7 -CONVERSION_TYPE_LOGH = 8 -CONVERSION_TYPE_RAT = 9 -CONVERSION_TYPE_FORMULA = 10 -CONVERSION_TYPE_VTAB = 11 -CONVERSION_TYPE_VTABR = 12 - -RAT_CONV_TEXT = '(P1 * X**2 + P2 * X + P3) / (P4 * X**2 + P5 * X + P6)' -POLY_CONV_SHORT_TEXT = 'P4 * X / P1' -POLY_CONV_LONG_TEXT = '(P2 - (P4 * (X - P5 -P6))) / (P3* (X - P5 - P6) - P1)' - -DEPENDENCY_TYPE_NONE = 0 -DEPENDENCY_TYPE_VECTOR = 1 -DEPENDENCY_TYPE_NDIM = 256 - -# flags -FLAG_PRECISION = 1 -FLAG_PHY_RANGE_OK = 2 -FLAG_VAL_RANGE_OK = 8 - -# channel source types -SOURCE_ECU = 2 -SOURCE_VECTOR = 19 - -# bus types -BUS_TYPE_NONE = 0 -BUS_TYPE_CAN = 2 -BUS_TYPE_FLEXRAY = 5 - -# file IO seek types -SEEK_START = 0 -SEEK_REL = 1 -SEEK_END = 2 - -# blocks size -ID_BLOCK_SIZE = 64 -HEADER_COMMON_SIZE = 164 -HEADER_POST_320_EXTRA_SIZE = 44 -CE_BLOCK_SIZE = 128 -FH_BLOCK_SIZE = 56 -DG31_BLOCK_SIZE = 24 -DG32_BLOCK_SIZE = 28 -HD_BLOCK_SIZE = 104 -CN20_BLOCK_SIZE = 218 -CN21_BLOCK_SIZE = 222 -CG_BLOCK_SIZE = 26 -CG33_BLOCK_SIZE = 30 -DT_BLOCK_SIZE = 24 -CC_COMMON_BLOCK_SIZE = 46 -CC_COMMON_SHORT_SIZE = 42 -CC_ALG_BLOCK_SIZE = 88 -CC_LIN_BLOCK_SIZE = 62 -CC_POLY_BLOCK_SIZE = 94 -CC_EXPO_BLOCK_SIZE = 102 -CC_FORMULA_BLOCK_SIZE = 304 -SR_BLOCK_SIZE = 156 - -# max int values -MAX_UINT8 = 2 << 8 - 1 -MAX_UINT16 = 2 << 16 - 1 -MAX_UNIT32 = 2 << 32 - 1 -MAX_UINT64 = 2 << 64 - 1 - -# data location -LOCATION_ORIGINAL_FILE = 0 -LOCATION_TEMPORARY_FILE = 1 -LOCATION_MEMORY = 2 - -# blocks struct fmts and keys -ID_FMT = '<8s8s8s4H2s26s2H' -ID_KEYS = ( - 'file_identification', - 'version_str', - 'program_identification', - 'byte_order', - 'float_format', - 'mdf_version', - 'code_page', - 'reserved0', - 'reserved1', - 'unfinalized_standard_flags', - 'unfinalized_custom_flags', -) - -HEADER_COMMON_FMT = '<2sH3IH10s8s32s32s32s32s' -HEADER_COMMON_KEYS = ( - 'id', - 'block_len', - 'first_dg_addr', - 'comment_addr', - 'program_addr', - 'dg_nr', - 'date', - 'time', - 'author', - 'organization', - 'project', - 'subject', -) - -HEADER_POST_320_EXTRA_FMT = 'Q2H32s' -HEADER_POST_320_EXTRA_KEYS = ( - 'abs_time', - 'tz_offset', - 'time_quality', - 'timer_identification', -) - -FMT_CHANNEL_20 = '<2sH5IH32s128s4H3d' -FMT_CHANNEL_21 = FMT_CHANNEL_20 + 'I' -KEYS_CHANNEL_20 = ( - 'id', - 'block_len', - 'next_ch_addr', - 'conversion_addr', - 'source_depend_addr', - 'ch_depend_addr', - 'comment_addr', - 'channel_type', - 'short_name', - 'description', - 'start_offset', - 'bit_count', - 'data_type', - 'range_flag', - 'min_raw_value', - 'max_raw_value', - 'sampling_rate', -) -KEYS_CHANNEL_21 = KEYS_CHANNEL_20 + ('long_name_addr', ) - -FMT_CHANNEL_GROUP = '<2sH3I3HI' -KEYS_CHANNEL_GROUP = ( - 'id', - 'block_len', - 'next_cg_addr', - 'first_ch_addr', - 'comment_addr', - 'record_id', - 'ch_nr', - 'samples_byte_nr', - 'cycles_nr', -) - -FMT_DATA_GROUP_32 = '<2sH4I2H4s' -KEYS_DATA_GROUP_32 = ( - 'id', - 'block_len', - 'next_dg_addr', - 'first_cg_addr', - 'trigger_addr', - 'data_block_addr', - 'cg_nr', - 'record_id_nr', - 'reserved0', -) - -FMT_DATA_GROUP = '<2sH4I2H' -KEYS_DATA_GROUP = ( - 'id', - 'block_len', - 'next_dg_addr', - 'first_cg_addr', - 'trigger_addr', - 'data_block_addr', - 'cg_nr', - 'record_id_nr', -) - -FMT_SOURCE_COMMON = '<2s2H' -FMT_SOURCE_ECU = '<2s3HI80s32s4s' -FMT_SOURCE_EXTRA_ECU = '= 36: @@ -1214,7 +1247,8 @@ class DataBlock(dict): file handle """ - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(DataBlock, self).__init__() @@ -1232,6 +1266,10 @@ def __init__(self, **kargs): ) self['data'] = stream.read(self['block_len'] - v4c.COMMON_SIZE) + if self['id'] != b'##DT': + message = 'Expected "##DT" block but found "{}"' + raise MdfException(message.format(self['id'])) + except KeyError: self['id'] = b'##DT' @@ -1240,6 +1278,9 @@ def __init__(self, **kargs): self['links_nr'] = 0 self['data'] = kargs['data'] + if PYVERSION_MAJOR < 30 and isinstance(self['data'], bytearray): + self['data'] = str(self['data']) + def __bytes__(self): fmt = v4c.FMT_DATA_BLOCK.format(self['block_len'] - v4c.COMMON_SIZE) if PYVERSION_MAJOR >= 36: @@ -1249,7 +1290,6 @@ def __bytes__(self): return result - class DataZippedBlock(dict): """DZBLOCK class @@ -1262,6 +1302,7 @@ class DataZippedBlock(dict): """ __slots__ = ['address', 'prevent_data_setitem', 'return_unzipped'] + def __init__(self, **kargs): super(DataZippedBlock, self).__init__() @@ -1287,6 +1328,10 @@ def __init__(self, **kargs): self['data'] = stream.read(self['zip_size']) + if self['id'] != b'##DZ': + message = 'Expected "##DZ" block but found "{}"' + raise MdfException(message.format(self['id'])) + except KeyError: self.prevent_data_setitem = False self.address = 0 @@ -1323,9 +1368,9 @@ def __setitem__(self, item, value): cols = self['param'] lines = self['original_size'] // cols - nd = np.fromstring(data[:lines*cols], dtype=np.uint8) + nd = np.fromstring(data[:lines * cols], dtype=np.uint8) nd = nd.reshape((lines, cols)) - data = nd.transpose().tostring() + data[lines*cols:] + data = nd.transpose().tostring() + data[lines * cols:] data = compress(data) @@ -1344,9 +1389,9 @@ def __getitem__(self, item): cols = self['param'] lines = self['original_size'] // cols - nd = np.fromstring(data[:lines*cols], dtype=np.uint8) + nd = np.fromstring(data[:lines * cols], dtype=np.uint8) nd = nd.reshape((cols, lines)) - data = nd.transpose().tostring() + data[lines*cols:] + data = nd.transpose().tostring() + data[lines * cols:] else: data = super(DataZippedBlock, self).__getitem__(item) value = data @@ -1367,7 +1412,8 @@ def __bytes__(self): class DataGroup(dict): """DGBLOCK class""" - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(DataGroup, self).__init__() @@ -1390,10 +1436,14 @@ def __init__(self, **kargs): stream.read(v4c.DG_BLOCK_SIZE), ) + if self['id'] != b'##DG': + message = 'Expected "##DG" block but found "{}"' + raise MdfException(message.format(self['id'])) + except KeyError: self.address = 0 - self['id'] = kargs.get('id', '##DG'.encode('utf-8')) + self['id'] = b'##DG' self['reserved0'] = kargs.get('reserved0', 0) self['block_len'] = kargs.get('block_len', v4c.DG_BLOCK_SIZE) self['links_nr'] = kargs.get('links_nr', 4) @@ -1402,7 +1452,7 @@ def __init__(self, **kargs): self['data_block_addr'] = kargs.get('data_block_addr', 0) self['comment_addr'] = kargs.get('comment_addr', 0) self['record_id_len'] = kargs.get('record_id_len', 0) - self['reserved1'] = kargs.get('reserved1', b'\00'*7) + self['reserved1'] = kargs.get('reserved1', b'\00' * 7) def __bytes__(self): if PYVERSION_MAJOR >= 36: @@ -1417,7 +1467,8 @@ def __bytes__(self): class DataList(dict): """DLBLOCK class""" - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(DataList, self).__init__() @@ -1438,7 +1489,7 @@ def __init__(self, **kargs): links = unpack( '<{}Q'.format(self['links_nr'] - 1), - stream.read( (self['links_nr'] - 1) * 8 ), + stream.read((self['links_nr'] - 1) * 8), ) for i, addr in enumerate(links): @@ -1456,11 +1507,15 @@ def __init__(self, **kargs): self['data_block_nr']) = unpack('<3sI', stream.read(7)) offsets = unpack( '<{}Q'.format(self['links_nr'] - 1), - stream.read((self['links_nr'] - 1)*8), + stream.read((self['links_nr'] - 1) * 8), ) for i, offset in enumerate(offsets): self['offset_{}'.format(i)] = offset + if self['id'] != b'##DL': + message = 'Expected "##DL" block but found "{}"' + raise MdfException(message.format(self['id'])) + except KeyError: self.address = 0 @@ -1485,7 +1540,6 @@ def __init__(self, **kargs): for i, offset in enumerate(self['links_nr'] - 1): self['offset_{}'.format(i)] = kargs['offset_{}'.format(i)] - def __bytes__(self): fmt = v4c.FMT_DATA_LIST.format(self['links_nr']) if PYVERSION_MAJOR < 36: @@ -1515,7 +1569,8 @@ def __bytes__(self): class FileIdentificationBlock(dict): """IDBLOCK class""" - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(FileIdentificationBlock, self).__init__() @@ -1570,7 +1625,8 @@ def __bytes__(self): class FileHistory(dict): """FHBLOCK class""" - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(FileHistory, self).__init__() @@ -1594,18 +1650,22 @@ def __init__(self, **kargs): stream.read(v4c.FH_BLOCK_SIZE), ) + if self['id'] != b'##FH': + message = 'Expected "##FH" block but found "{}"' + raise MdfException(message.format(self['id'])) + except KeyError: - self['id'] = kargs.get('id', '##FH'.encode('utf-8')) + self['id'] = b'##FH' self['reserved0'] = kargs.get('reserved0', 0) self['block_len'] = kargs.get('block_len', v4c.FH_BLOCK_SIZE) self['links_nr'] = kargs.get('links_nr', 2) self['next_fh_addr'] = kargs.get('next_fh_addr', 0) self['comment_addr'] = kargs.get('comment_addr', 0) - self['abs_time'] = kargs.get('abs_time', int(time.time()) * 10**9) + self['abs_time'] = kargs.get('abs_time', int(time.time()) * 10 ** 9) self['tz_offset'] = kargs.get('tz_offset', 120) self['daylight_save_time'] = kargs.get('daylight_save_time', 60) self['time_flags'] = kargs.get('time_flags', 2) - self['reserved1'] = kargs.get('reserved1', b'\x00'*3) + self['reserved1'] = kargs.get('reserved1', b'\x00' * 3) def __bytes__(self): if PYVERSION_MAJOR >= 36: @@ -1620,7 +1680,8 @@ def __bytes__(self): class HeaderBlock(dict): """HDBLOCK class""" - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(HeaderBlock, self).__init__() @@ -1652,9 +1713,13 @@ def __init__(self, **kargs): stream.read(v4c.HEADER_BLOCK_SIZE), ) + if self['id'] != b'##HD': + message = 'Expected "##HD" block but found "{}"' + raise MdfException(message.format(self['id'])) + except KeyError: - self['id'] = '##HD'.encode('utf-8') + self['id'] = b'##HD' self['reserved3'] = kargs.get('reserved3', 0) self['block_len'] = kargs.get('block_len', v4c.HEADER_BLOCK_SIZE) self['links_nr'] = kargs.get('links_nr', 6) @@ -1667,7 +1732,7 @@ def __init__(self, **kargs): ) self['first_event_addr'] = kargs.get('first_event_addr', 0) self['comment_addr'] = kargs.get('comment_addr', 0) - self['abs_time'] = kargs.get('abs_time', int(time.time()) * 10**9) + self['abs_time'] = kargs.get('abs_time', int(time.time()) * 10 ** 9) self['tz_offset'] = kargs.get('tz_offset', 120) self['daylight_save_time'] = kargs.get('daylight_save_time', 60) self['time_flags'] = kargs.get('time_flags', 2) @@ -1713,6 +1778,10 @@ def __init__(self, **kargs): stream.read(v4c.HL_BLOCK_SIZE), ) + if self['id'] != b'##HL': + message = 'Expected "##HL" block but found "{}"' + raise MdfException(message.format(self['id'])) + except KeyError: self.address = 0 @@ -1739,12 +1808,35 @@ def __bytes__(self): class SourceInformation(dict): """SIBLOCK class""" - __slots__ = ['address', ] + __slots__ = ['address', 'name', 'path', 'comment'] def __init__(self, **kargs): super(SourceInformation, self).__init__() - if 'stream' in kargs: + self.name = self.path = self.comment = '' + + if 'raw_bytes' in kargs: + self.address = 0 + (self['id'], + self['reserved0'], + self['block_len'], + self['links_nr'], + self['name_addr'], + self['path_addr'], + self['comment_addr'], + self['source_type'], + self['bus_type'], + self['flags'], + self['reserved1']) = unpack( + v4c.FMT_SOURCE_INFORMATION, + kargs['raw_bytes'], + ) + + if self['id'] != b'##SI': + message = 'Expected "##SI" block but found "{}"' + raise MdfException(message.format(self['id'])) + + elif 'stream' in kargs: self.address = address = kargs['address'] stream = kargs['stream'] stream.seek(address, SEEK_START) @@ -1764,15 +1856,19 @@ def __init__(self, **kargs): stream.read(v4c.SI_BLOCK_SIZE), ) + if self['id'] != b'##SI': + message = 'Expected "##SI" block but found "{}"' + raise MdfException(message.format(self['id'])) + else: self.address = 0 self['id'] = b'##SI' self['reserved0'] = 0 self['block_len'] = v4c.SI_BLOCK_SIZE self['links_nr'] = 3 - self['name_addr'] = 0 - self['path_addr'] = 0 - self['comment_addr'] = 0 + self['name_addr'] = kargs.get('name_addr', 0) + self['path_addr'] = kargs.get('path_addr', 0) + self['comment_addr'] = kargs.get('comment_addr', 0) self['source_type'] = kargs.get('source_type', v4c.SOURCE_TOOL) self['bus_type'] = kargs.get('bus_type', v4c.BUS_TYPE_NONE) self['flags'] = 0 @@ -1791,7 +1887,8 @@ def __bytes__(self): class SignalDataBlock(dict): """SDBLOCK class""" - __slots__ = ['address',] + __slots__ = ['address', ] + def __init__(self, **kargs): super(SignalDataBlock, self).__init__() @@ -1809,6 +1906,10 @@ def __init__(self, **kargs): ) self['data'] = stream.read(self['block_len'] - v4c.COMMON_SIZE) + if self['id'] != b'##SD': + message = 'Expected "##SD" block but found "{}"' + raise MdfException(message.format(self['id'])) + except KeyError: self.address = 0 @@ -1854,6 +1955,10 @@ def __init__(self, **kargs): self['text'] = text = stream.read(size) + if self['id'] not in (b'##TX', b'##MD'): + message = 'Expected "##TX" or "##MD" block @{} but found "{}"' + raise MdfException(message.format(hex(address), self['id'])) + else: self.address = 0 @@ -1881,8 +1986,12 @@ def __init__(self, **kargs): align = size % 8 if align: self['block_len'] = size + v4c.COMMON_SIZE + 8 - align - elif text and text[-1] not in (0, b'\0'): - self['block_len'] = size + v4c.COMMON_SIZE + 8 + else: + if text: + if text[-1] not in (0, b'\0'): + self['block_len'] += 8 + else: + self['block_len'] += 8 def __bytes__(self): fmt = v4c.FMT_TEXT_BLOCK.format(self['block_len'] - v4c.COMMON_SIZE) diff --git a/asammdf/v4constants.py b/asammdf/v4_constants.py similarity index 97% rename from asammdf/v4constants.py rename to asammdf/v4_constants.py index 78c6b953f..1bb964aa8 100644 --- a/asammdf/v4constants.py +++ b/asammdf/v4_constants.py @@ -63,12 +63,19 @@ CONVERSION_TYPE_TTAB, } +CONVERSIONS_WITH_TEXTS = { + CONVERSION_TYPE_ALG, + CONVERSION_TYPE_RTABX, + CONVERSION_TYPE_TABX, + CONVERSION_TYPE_TRANS, + CONVERSION_TYPE_TTAB, +} + CA_TYPE_ARRAY = 0 CA_TYPE_SCALE_AXIS = 1 CA_TYPE_LOOKUP = 2 CA_STORAGE_TYPE_CN_TEMPLATE = 0 - SOURCE_ECU = 1 SOURCE_BUS = 2 SOURCE_IO = 3 @@ -254,7 +261,6 @@ FMT_CONVERSION_RAT_INIT = '<4Q2B3H8d' - FMT_HEADER_BLOCK = '<4sI9Q2H4B2Q' FMT_IDENTIFICATION_BLOCK = '<8s8s8s5H26s2H' diff --git a/asammdf/version.py b/asammdf/version.py index cd118a5ef..b29834508 100644 --- a/asammdf/version.py +++ b/asammdf/version.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- """ asammdf version module """ -__version__ = '2.8.1' +__version__ = '3.0.0dev' diff --git a/benchmarks/bench.py b/benchmarks/bench.py index d9fac5c20..b68440c75 100644 --- a/benchmarks/bench.py +++ b/benchmarks/bench.py @@ -28,7 +28,7 @@ PYVERSION = sys.version_info[0] if PYVERSION > 2: - from time import perf_counter + from time import perf_counter else: from time import clock as perf_counter @@ -118,8 +118,8 @@ def __exit__(self, type_, value, tracebackobj): return True -def open_mdf3(path, output, fmt, memory): - os.chdir(path) +def open_mdf3(output, fmt, memory): + with Timer('Open file', 'asammdf {} {} mdfv3'.format(asammdf_version, memory), fmt) as timer: @@ -127,8 +127,8 @@ def open_mdf3(path, output, fmt, memory): output.send([timer.output, timer.error]) -def open_mdf4(path, output, fmt, memory): - os.chdir(path) +def open_mdf4(output, fmt, memory): + with Timer('Open file', 'asammdf {} {} mdfv4'.format(asammdf_version, memory), fmt) as timer: @@ -136,8 +136,8 @@ def open_mdf4(path, output, fmt, memory): output.send([timer.output, timer.error]) -def save_mdf3(path, output, fmt, memory): - os.chdir(path) +def save_mdf3(output, fmt, memory): + x = MDF(r'test.mdf', memory=memory) with Timer('Save file', 'asammdf {} {} mdfv3'.format(asammdf_version, memory), @@ -146,8 +146,8 @@ def save_mdf3(path, output, fmt, memory): output.send([timer.output, timer.error]) -def save_mdf4(path, output, fmt, memory): - os.chdir(path) +def save_mdf4(output, fmt, memory): + x = MDF(r'test.mf4', memory=memory) with Timer('Save file', 'asammdf {} {} mdfv4'.format(asammdf_version, memory), @@ -156,8 +156,8 @@ def save_mdf4(path, output, fmt, memory): output.send([timer.output, timer.error]) -def get_all_mdf3(path, output, fmt, memory): - os.chdir(path) +def get_all_mdf3(output, fmt, memory): + x = MDF(r'test.mdf', memory=memory,) with Timer('Get all channels', 'asammdf {} {} mdfv3'.format(asammdf_version, memory), @@ -168,8 +168,8 @@ def get_all_mdf3(path, output, fmt, memory): output.send([timer.output, timer.error]) -def get_all_mdf4(path, output, fmt, memory): - os.chdir(path) +def get_all_mdf4(output, fmt, memory): + x = MDF(r'test.mf4', memory=memory,) with Timer('Get all channels', 'asammdf {} {} mdfv4'.format(asammdf_version, memory), @@ -180,8 +180,8 @@ def get_all_mdf4(path, output, fmt, memory): output.send([timer.output, timer.error]) -def convert_v3_v4(path, output, fmt, memory): - os.chdir(path) +def convert_v3_v4(output, fmt, memory): + with MDF(r'test.mdf', memory=memory,) as x: with Timer('Convert file', 'asammdf {} {} v3 to v4'.format( @@ -193,8 +193,8 @@ def convert_v3_v4(path, output, fmt, memory): output.send([timer.output, timer.error]) -def convert_v4_v3(path, output, fmt, memory): - os.chdir(path) +def convert_v4_v3(output, fmt, memory): + with MDF(r'test.mf4', memory=memory,) as x: with Timer('Convert file', 'asammdf {} {} v4 to v3'.format( @@ -207,8 +207,8 @@ def convert_v4_v3(path, output, fmt, memory): output.send([timer.output, timer.error]) -def merge_v3(path, output, fmt, memory): - os.chdir(path) +def merge_v3(output, fmt, memory): + files = [r'test.mdf', ] * 2 with Timer('Merge files', 'asammdf {} {} v3'.format(asammdf_version, memory), @@ -217,9 +217,9 @@ def merge_v3(path, output, fmt, memory): output.send([timer.output, timer.error]) -def merge_v4(path, output, fmt, memory): +def merge_v4(output, fmt, memory): files = [r'test.mf4', ] * 2 - os.chdir(path) + with Timer('Merge files', 'asammdf {} {} v4'.format(asammdf_version, memory), fmt) as timer: @@ -232,8 +232,8 @@ def merge_v4(path, output, fmt, memory): # -def open_reader3(path, output, fmt): - os.chdir(path) +def open_reader3(output, fmt): + with Timer('Open file', 'mdfreader {} mdfv3'.format(mdfreader_version), fmt) as timer: @@ -241,8 +241,8 @@ def open_reader3(path, output, fmt): output.send([timer.output, timer.error]) -def open_reader3_nodata(path, output, fmt): - os.chdir(path) +def open_reader3_nodata(output, fmt): + with Timer('Open file', 'mdfreader {} noDataLoading mdfv3'.format(mdfreader_version), fmt) as timer: @@ -250,8 +250,8 @@ def open_reader3_nodata(path, output, fmt): output.send([timer.output, timer.error]) -def open_reader3_compression(path, output, fmt): - os.chdir(path) +def open_reader3_compression(output, fmt): + with Timer('Open file', 'mdfreader {} compress mdfv3'.format(mdfreader_version), fmt) as timer: @@ -259,8 +259,8 @@ def open_reader3_compression(path, output, fmt): output.send([timer.output, timer.error]) -def open_reader4(path, output, fmt): - os.chdir(path) +def open_reader4(output, fmt): + with Timer('Open file', 'mdfreader {} mdfv4'.format(mdfreader_version), fmt) as timer: @@ -268,8 +268,8 @@ def open_reader4(path, output, fmt): output.send([timer.output, timer.error]) -def open_reader4_nodata(path, output, fmt): - os.chdir(path) +def open_reader4_nodata(output, fmt): + with Timer('Open file', 'mdfreader {} noDataLoading mdfv4'.format(mdfreader_version), fmt) as timer: @@ -277,8 +277,8 @@ def open_reader4_nodata(path, output, fmt): output.send([timer.output, timer.error]) -def open_reader4_compression(path, output, fmt): - os.chdir(path) +def open_reader4_compression(output, fmt): + with Timer('Open file', 'mdfreader {} compress mdfv4'.format(mdfreader_version), fmt) as timer: @@ -286,8 +286,8 @@ def open_reader4_compression(path, output, fmt): output.send([timer.output, timer.error]) -def save_reader3(path, output, fmt): - os.chdir(path) +def save_reader3(output, fmt): + x = MDFreader(r'test.mdf') with Timer('Save file', 'mdfreader {} mdfv3'.format(mdfreader_version), @@ -296,8 +296,8 @@ def save_reader3(path, output, fmt): output.send([timer.output, timer.error]) -def save_reader3_nodata(path, output, fmt): - os.chdir(path) +def save_reader3_nodata(output, fmt): + x = MDFreader(r'test.mdf', noDataLoading=True) with Timer('Save file', 'mdfreader {} noDataLoading mdfv3'.format(mdfreader_version), @@ -306,18 +306,22 @@ def save_reader3_nodata(path, output, fmt): output.send([timer.output, timer.error]) -def save_reader3_compression(path, output, fmt): - os.chdir(path) - x = MDFreader(r'test.mdf', compression='blosc') +def save_reader3_compression(output, fmt): with Timer('Save file', 'mdfreader {} compress mdfv3'.format(mdfreader_version), - fmt) as timer: - x.write(r'x.mdf') - output.send([timer.output, timer.error]) + fmt) as outer_timer: + x = MDFreader(r'test.mdf', compression='blosc') + with Timer('Save file', + 'mdfreader {} compress mdfv3'.format(mdfreader_version), + fmt) as timer: + x.write(r'x.mdf') + output.send([timer.output, timer.error]) + if outer_timer.error: + output.send([timer.output, timer.error]) -def save_reader4(path, output, fmt): - os.chdir(path) +def save_reader4(output, fmt): + x = MDFreader(r'test.mf4') with Timer('Save file', 'mdfreader {} mdfv4'.format(mdfreader_version), @@ -326,8 +330,8 @@ def save_reader4(path, output, fmt): output.send([timer.output, timer.error]) -def save_reader4_nodata(path, output, fmt): - os.chdir(path) +def save_reader4_nodata(output, fmt): + x = MDFreader(r'test.mf4', noDataLoading=True) with Timer('Save file', 'mdfreader {} noDataLoading mdfv4'.format(mdfreader_version), @@ -336,8 +340,8 @@ def save_reader4_nodata(path, output, fmt): output.send([timer.output, timer.error]) -def save_reader4_compression(path, output, fmt): - os.chdir(path) +def save_reader4_compression(output, fmt): + x = MDFreader(r'test.mf4', compression='blosc') with Timer('Save file', 'mdfreader {} compress mdfv4'.format(mdfreader_version), @@ -346,8 +350,8 @@ def save_reader4_compression(path, output, fmt): output.send([timer.output, timer.error]) -def get_all_reader3(path, output, fmt): - os.chdir(path) +def get_all_reader3(output, fmt): + x = MDFreader(r'test.mdf') with Timer('Get all channels', 'mdfreader {} mdfv3'.format(mdfreader_version), @@ -357,8 +361,8 @@ def get_all_reader3(path, output, fmt): output.send([timer.output, timer.error]) -def get_all_reader3_nodata(path, output, fmt): - os.chdir(path) +def get_all_reader3_nodata(output, fmt): + x = MDFreader(r'test.mdf', noDataLoading=True) with Timer('Get all channels', 'mdfreader {} nodata mdfv3'.format(mdfreader_version), @@ -368,22 +372,22 @@ def get_all_reader3_nodata(path, output, fmt): output.send([timer.output, timer.error]) -def get_all_reader3_compression(path, output, fmt): - os.chdir(path) +def get_all_reader3_compression(output, fmt): + x = MDFreader(r'test.mdf', compression='blosc') with Timer('Get all channels', 'mdfreader {} compress mdfv3'.format(mdfreader_version), fmt) as timer: for s in x: x.getChannelData(s) - + with open('D:\\TMP\\f.txt', 'w') as f: f.write('OK') output.send([timer.output, timer.error]) -def get_all_reader4(path, output, fmt): - os.chdir(path) +def get_all_reader4(output, fmt): + x = MDFreader(r'test.mf4') with Timer('Get all channels', 'mdfreader {} mdfv4'.format(mdfreader_version), @@ -393,8 +397,8 @@ def get_all_reader4(path, output, fmt): output.send([timer.output, timer.error]) -def get_all_reader4_nodata(path, output, fmt): - os.chdir(path) +def get_all_reader4_nodata(output, fmt): + x = MDFreader(r'test.mf4', noDataLoading=True) with Timer('Get all channels', 'mdfreader {} nodata mdfv4'.format(mdfreader_version), @@ -404,8 +408,8 @@ def get_all_reader4_nodata(path, output, fmt): output.send([timer.output, timer.error]) -def get_all_reader4_compression(path, output, fmt): - os.chdir(path) +def get_all_reader4_compression(output, fmt): + x = MDFreader(r'test.mf4', compression='blosc') with Timer('Get all channels', 'mdfreader {} compress mdfv4'.format(mdfreader_version), @@ -415,8 +419,8 @@ def get_all_reader4_compression(path, output, fmt): output.send([timer.output, timer.error]) -def merge_reader_v3(path, output, fmt): - os.chdir(path) +def merge_reader_v3(output, fmt): + files = [r'test.mdf', ] * 2 with Timer('Merge files', 'mdfreader {} v3'.format(mdfreader_version), @@ -429,8 +433,8 @@ def merge_reader_v3(path, output, fmt): output.send([timer.output, timer.error]) -def merge_reader_v3_compress(path, output, fmt): - os.chdir(path) +def merge_reader_v3_compress(output, fmt): + files = [r'test.mdf', ] * 2 with Timer('Merge files', 'mdfreader {} compress v3'.format(mdfreader_version), @@ -443,9 +447,23 @@ def merge_reader_v3_compress(path, output, fmt): output.send([timer.output, timer.error]) -def merge_reader_v4(path, output, fmt): +def merge_reader_v3_nodata(output, fmt): + + files = [r'test.mdf', ] * 2 + with Timer('Merge files', + 'mdfreader {} nodata v3'.format(mdfreader_version), + fmt) as timer: + x1 = MDFreader(files[0], noDataLoading=True) + x1.resample(0.01) + x2 = MDFreader(files[1], noDataLoading=True) + x2.resample(0.01) + x1.mergeMdf(x2) + output.send([timer.output, timer.error]) + + +def merge_reader_v4(output, fmt): files = [r'test.mf4', ] * 2 - os.chdir(path) + with Timer('Merge files', 'mdfreader {} v4'.format(mdfreader_version), fmt) as timer: @@ -457,8 +475,8 @@ def merge_reader_v4(path, output, fmt): output.send([timer.output, timer.error]) -def merge_reader_v4_compress(path, output, fmt): - os.chdir(path) +def merge_reader_v4_compress(output, fmt): + files = [r'test.mf4', ] * 2 with Timer('Merge files', 'mdfreader {} compress v4'.format(mdfreader_version), @@ -470,6 +488,19 @@ def merge_reader_v4_compress(path, output, fmt): x1.mergeMdf(x2) output.send([timer.output, timer.error]) +def merge_reader_v4_nodata(output, fmt): + + files = [r'test.mf4', ] * 2 + with Timer('Merge files', + 'mdfreader {} nodata v4'.format(mdfreader_version), + fmt) as timer: + x1 = MDFreader(files[0], noDataLoading=True) + x1.resample(0.01) + x2 = MDFreader(files[1], noDataLoading=True) + x2.resample(0.01) + x1.mergeMdf(x2) + output.send([timer.output, timer.error]) + # # utility functions # @@ -498,14 +529,13 @@ def table_end(fmt='rst'): return ['', ] -def main(path, text_output, fmt): +def main(text_output, fmt): + if os.path.dirname(__file__): + os.chdir(os.path.dirname(__file__)) listen, send = multiprocessing.Pipe() output = MyList() errors = [] - if not path: - path = os.path.dirname(__file__) - installed_ram = round(psutil.virtual_memory().total / 1024 / 1024 / 1024) output.append('Benchmark environment\n') @@ -537,20 +567,20 @@ def main(path, text_output, fmt): partial(open_mdf3, memory='low'), partial(open_mdf3, memory='minimum'), open_reader3, -# open_reader3_compression, + open_reader3_compression, open_reader3_nodata, partial(open_mdf4, memory='full'), partial(open_mdf4, memory='low'), partial(open_mdf4, memory='minimum'), open_reader4, -# open_reader4_compression, + open_reader4_compression, open_reader4_nodata, ) if tests: output.extend(table_header('Open file', fmt)) for func in tests: - thr = multiprocessing.Process(target=func, args=(path, send, fmt)) + thr = multiprocessing.Process(target=func, args=(send, fmt)) thr.start() thr.join() result, err = listen.recv() @@ -564,19 +594,19 @@ def main(path, text_output, fmt): partial(save_mdf3, memory='minimum'), save_reader3, save_reader3_nodata, -# save_reader3_compression, + save_reader3_compression, partial(save_mdf4, memory='full'), partial(save_mdf4, memory='low'), partial(save_mdf4, memory='minimum'), save_reader4, save_reader4_nodata, -# save_reader4_compression, + save_reader4_compression, ) if tests: output.extend(table_header('Save file', fmt)) for func in tests: - thr = multiprocessing.Process(target=func, args=(path, send, fmt)) + thr = multiprocessing.Process(target=func, args=(send, fmt)) thr.start() thr.join() result, err = listen.recv() @@ -589,20 +619,20 @@ def main(path, text_output, fmt): partial(get_all_mdf3, memory='low'), partial(get_all_mdf3, memory='minimum'), get_all_reader3, -# get_all_reader3_nodata, -# get_all_reader3_compression, + get_all_reader3_nodata, + get_all_reader3_compression, partial(get_all_mdf4, memory='full'), partial(get_all_mdf4, memory='low'), partial(get_all_mdf4, memory='minimum'), get_all_reader4, -# get_all_reader4_nodata, -# get_all_reader4_compression, + get_all_reader4_nodata, + get_all_reader4_compression, ) if tests: output.extend(table_header('Get all channels (36424 calls)', fmt)) for func in tests: - thr = multiprocessing.Process(target=func, args=(path, send, fmt)) + thr = multiprocessing.Process(target=func, args=(send, fmt)) thr.start() thr.join() result, err = listen.recv() @@ -622,7 +652,7 @@ def main(path, text_output, fmt): if tests: output.extend(table_header('Convert file', fmt)) for func in tests: - thr = multiprocessing.Process(target=func, args=(path, send, fmt)) + thr = multiprocessing.Process(target=func, args=(send, fmt)) thr.start() thr.join() result, err = listen.recv() @@ -635,18 +665,20 @@ def main(path, text_output, fmt): partial(merge_v3, memory='low'), partial(merge_v3, memory='minimum'), merge_reader_v3, -# merge_reader_v3_compress, + merge_reader_v3_compress, + merge_reader_v3_nodata, partial(merge_v4, memory='full'), partial(merge_v4, memory='low'), partial(merge_v4, memory='minimum'), merge_reader_v4, -# merge_reader_v4_compress, + merge_reader_v4_nodata, + merge_reader_v4_compress, ) if tests: output.extend(table_header('Merge files', fmt)) for func in tests: - thr = multiprocessing.Process(target=func, args=(path, send, fmt)) + thr = multiprocessing.Process(target=func, args=(send, fmt)) thr.start() thr.join() result, err = listen.recv() @@ -667,7 +699,7 @@ def main(path, text_output, fmt): with open(file, 'w') as out: out.write('\n'.join(output)) - os.chdir(path) + for file in ('x.mdf', 'x.mf4'): try: os.remove(file) @@ -699,4 +731,4 @@ def _cmd_line_parser(): cmd_parser = _cmd_line_parser() args = cmd_parser.parse_args(sys.argv[1:]) - main(args.path, args.text_output, args.format) + main(args.text_output, args.format) diff --git a/documentation/examples.rst b/documentation/examples.rst index 0573c08e3..541e3e3c4 100644 --- a/documentation/examples.rst +++ b/documentation/examples.rst @@ -7,7 +7,7 @@ - + .. role:: red .. role:: blue .. role:: green @@ -88,11 +88,11 @@ Working with MDF # save using zipped transpose deflate blocks mdf4.save('out.mf4', compression=2, overwrite=True) - - + + Working with Signal ------------------- - + .. code-block:: python from __future__ import print_function, division diff --git a/documentation/index.rst b/documentation/index.rst index 6baa206ad..8132feea0 100644 --- a/documentation/index.rst +++ b/documentation/index.rst @@ -25,7 +25,7 @@ Features * create new mdf files from scratch * append new channels -* read unsorted MDF v3 and v4 files +* read unsorted MDF v2, v3 and v4 files * filter a subset of channels from original mdf file * cut measurement to specified time interval * convert to different mdf version diff --git a/documentation/mdf.rst b/documentation/mdf.rst index 75f952ac1..3f6bea118 100644 --- a/documentation/mdf.rst +++ b/documentation/mdf.rst @@ -22,8 +22,8 @@ MDF === This class acts as a proxy for the MDF3 and MDF4 classes. -All attribute access is delegated to the underlying *_mdf* attribute (MDF3 or MDF4 object). -See MDF3 and MDF4 for available extra methods. +All attribute access is delegated to the underlying *_mdf* attribute (MDF2, MDF3 or MDF4 object). +See MDF3 and MDF4 for available extra methods (MDF2 and MDF3 share the same implementation). An empty MDF file is created if the *name* argument is not provided. If the *name* argument is provided then the file must exist in the filesystem, otherwise an exception is raised. @@ -40,13 +40,12 @@ Best practice is to use the MDF as a context manager. This way all resources are :members: -MDF2, MDF3 and MDF4 classes ---------------------------- +MDF3 and MDF4 classes +--------------------- .. toctree:: :maxdepth: 1 - mdf2 mdf3 mdf4 diff --git a/documentation/mdf2.rst b/documentation/mdf2.rst deleted file mode 100644 index 2e9393308..000000000 --- a/documentation/mdf2.rst +++ /dev/null @@ -1,76 +0,0 @@ -.. raw:: html - - - - - - - - - -.. role:: red -.. role:: blue -.. role:: green -.. role:: cyan -.. role:: magenta -.. role:: orange -.. role:: brown - -.. _mdf2: - -MDF2 -==== - -asammdf tries to emulate the mdf structure using Python builtin data types. - -The *header* attibute is an OrderedDict that holds the file metadata. - -The *groups* attribute is a dictionary list with the following keys: - -* data_group : DataGroup object -* channel_group : ChannelGroup object -* channels : list of Channel objects with the same order as found in the mdf file -* channel_conversions : list of ChannelConversion objects in 1-to-1 relation with the channel list -* channel_sources : list of SourceInformation objects in 1-to-1 relation with the channels list -* chanel_dependencies : list of ChannelDependency objects in a 1-to-1 relation with the channel list -* data_block : DataBlock object -* texts : dictionay containing TextBlock objects used throughout the mdf - - * channels : list of dictionaries that contain TextBlock objects ralated to each channel - - * long_name_addr : channel long name - * comment_addr : channel comment - * display_name_addr : channel display name - - * channel group : list of dictionaries that contain TextBlock objects ralated to each channel group - - * comment_addr : channel group comment - - * conversion_tab : list of dictionaries that contain TextBlock objects ralated to VATB and VTABR channel conversions - - * text_{n} : n-th text of the VTABR conversion - -* sorted : bool flag to indicate if the source file was sorted; it is used when `memory` is `low` or `minimum` -* size : data block size; used for lazy laoding of measured data -* record_size : dict of record ID -> record size pairs - -The *file_history* attribute is a TextBlock object. - -The *channel_db* attibute is a dictionary that holds the *(data group index, channel index)* pair for all signals. This is used to speed up the *get_signal_by_name* method. - -The *master_db* attibute is a dictionary that holds the *channel index* of the master channel for all data groups. This is used to speed up the *get_signal_by_name* method. - -API ---- - -.. autoclass:: asammdf.mdf2.MDF2 - :members: - :noindex: - -MDF version 2 blocks --------------------- - -.. toctree:: - :maxdepth: 2 - - v2blocks diff --git a/documentation/mdf3.rst b/documentation/mdf3.rst index 07ca52ab2..7f3f834b7 100644 --- a/documentation/mdf3.rst +++ b/documentation/mdf3.rst @@ -16,7 +16,7 @@ .. role:: orange .. role:: brown -.. _mdf3: +.. _MDF3: MDF3 ==== @@ -63,14 +63,14 @@ The *master_db* attibute is a dictionary that holds the *channel index* of the API --- -.. autoclass:: asammdf.mdf3.MDF3 +.. autoclass:: asammdf.mdf_v3.MDF3 :members: :noindex: -MDF version 3 blocks --------------------- +MDF version 2 & 3 blocks +------------------------ .. toctree:: :maxdepth: 2 - v3blocks + v2v3blocks diff --git a/documentation/mdf4.rst b/documentation/mdf4.rst index 6c9f6ea56..20c607d94 100644 --- a/documentation/mdf4.rst +++ b/documentation/mdf4.rst @@ -16,7 +16,7 @@ .. role:: orange .. role:: brown -.. _mdf4: +.. _MDF4: MDF4 ==== @@ -72,7 +72,7 @@ The *master_db* attibute is a dictionary that holds the *channel index* of the API --- -.. autoclass:: asammdf.mdf4.MDF4 +.. autoclass:: asammdf.mdf_v4.MDF4 :members: :noindex: diff --git a/documentation/v2blocks.rst b/documentation/v2v3blocks.rst similarity index 62% rename from documentation/v2blocks.rst rename to documentation/v2v3blocks.rst index 0fd80dab0..0186a37ed 100644 --- a/documentation/v2blocks.rst +++ b/documentation/v2v3blocks.rst @@ -22,50 +22,50 @@ The following classes implement different MDF version3 blocks. Channel Class ------------- -.. autoclass:: asammdf.mdf2.Channel +.. autoclass:: asammdf.v2_v3_blocks.Channel ChannelConversion Class ----------------------- -.. autoclass:: asammdf.mdf2.ChannelConversion +.. autoclass:: asammdf.v2_v3_blocks.ChannelConversion ChannelDependency Class ----------------------- -.. autoclass:: asammdf.mdf2.ChannelDependency +.. autoclass:: asammdf.v2_v3_blocks.ChannelDependency ChannelExtension Class ---------------------- -.. autoclass:: asammdf.mdf2.ChannelExtension +.. autoclass:: asammdf.v2_v3_blocks.ChannelExtension ChannelGroup Class ------------------ -.. autoclass:: asammdf.mdf2.ChannelGroup +.. autoclass:: asammdf.v2_v3_blocks.ChannelGroup DataGroup Class --------------- -.. autoclass:: asammdf.mdf2.DataGroup +.. autoclass:: asammdf.v2_v3_blocks.DataGroup FileIdentificationBlock Class ----------------------------- -.. autoclass:: asammdf.mdf2.FileIdentificationBlock +.. autoclass:: asammdf.v2_v3_blocks.FileIdentificationBlock HeaderBlock Class ----------------- -.. autoclass:: asammdf.mdf2.HeaderBlock +.. autoclass:: asammdf.v2_v3_blocks.HeaderBlock ProgramBlock Class ------------------ -.. autoclass:: asammdf.v2blocks.ProgramBlock +.. autoclass:: asammdf.v2_v3_blocks.ProgramBlock SampleReduction Class --------------------- -.. autoclass:: asammdf.v2blocks.SampleReduction +.. autoclass:: asammdf.v2_v3_blocks.SampleReduction :members: TextBlock Class --------------- -.. autoclass:: asammdf.mdf2.TextBlock +.. autoclass:: asammdf.v2_v3_blocks.TextBlock TriggerBlock Class ------------------ -.. autoclass:: asammdf.mdf2.TriggerBlock +.. autoclass:: asammdf.v2_v3_blocks.TriggerBlock diff --git a/documentation/v3blocks.rst b/documentation/v3blocks.rst deleted file mode 100644 index 167ac3270..000000000 --- a/documentation/v3blocks.rst +++ /dev/null @@ -1,71 +0,0 @@ -.. raw:: html - - - - - - - - - -.. role:: red -.. role:: blue -.. role:: green -.. role:: cyan -.. role:: magenta -.. role:: orange -.. role:: brown - -.. _v3blocks: - -The following classes implement different MDF version3 blocks. - -Channel Class -------------- -.. autoclass:: asammdf.mdf3.Channel - - -ChannelConversion Class ------------------------ -.. autoclass:: asammdf.mdf3.ChannelConversion - -ChannelDependency Class ------------------------ -.. autoclass:: asammdf.mdf3.ChannelDependency - -ChannelExtension Class ----------------------- -.. autoclass:: asammdf.mdf3.ChannelExtension - -ChannelGroup Class ------------------- -.. autoclass:: asammdf.mdf3.ChannelGroup - -DataGroup Class ---------------- -.. autoclass:: asammdf.mdf3.DataGroup - -FileIdentificationBlock Class ------------------------------ -.. autoclass:: asammdf.mdf3.FileIdentificationBlock - -HeaderBlock Class ------------------ -.. autoclass:: asammdf.mdf3.HeaderBlock - -ProgramBlock Class ------------------- -.. autoclass:: asammdf.v3blocks.ProgramBlock - -SampleReduction Class ---------------------- -.. autoclass:: asammdf.v3blocks.SampleReduction - :members: - -TextBlock Class ---------------- -.. autoclass:: asammdf.mdf3.TextBlock - -TriggerBlock Class ------------------- -.. autoclass:: asammdf.mdf3.TriggerBlock diff --git a/documentation/v4blocks.rst b/documentation/v4blocks.rst index 2ba9d3861..e5bc48299 100644 --- a/documentation/v4blocks.rst +++ b/documentation/v4blocks.rst @@ -18,64 +18,64 @@ .. _v4blocks: -The following classes implement different MDF version3 blocks. +The following classes implement different MDF version4 blocks. AttachmentBlock Class --------------------- -.. autoclass:: asammdf.mdf4.AttachmentBlock +.. autoclass:: asammdf.v4_blocks.AttachmentBlock :members: Channel Class ------------- -.. autoclass:: asammdf.mdf4.Channel +.. autoclass:: asammdf.v4_blocks.Channel :members: ChannelConversion Class ----------------------- -.. autoclass:: asammdf.mdf4.ChannelConversion +.. autoclass:: asammdf.v4_blocks.ChannelConversion :members: ChannelGroup Class ------------------ -.. autoclass:: asammdf.mdf4.ChannelGroup +.. autoclass:: asammdf.v4_blocks.ChannelGroup :members: DataGroup Class --------------- -.. autoclass:: asammdf.mdf4.DataGroup +.. autoclass:: asammdf.v4_blocks.DataGroup :members: DataList Class -------------- -.. autoclass:: asammdf.mdf4.DataList +.. autoclass:: asammdf.v4_blocks.DataList :members: DataBlock Class --------------- -.. autoclass:: asammdf.mdf4.DataBlock +.. autoclass:: asammdf.v4_blocks.DataBlock :members: FileIdentificationBlock Class ----------------------------- -.. autoclass:: asammdf.mdf4.FileIdentificationBlock +.. autoclass:: asammdf.v4_blocks.FileIdentificationBlock :members: HeaderBlock Class ----------------- -.. autoclass:: asammdf.mdf4.HeaderBlock +.. autoclass:: asammdf.v4_blocks.HeaderBlock :members: SourceInformation Class ----------------------- -.. autoclass:: asammdf.mdf4.SourceInformation +.. autoclass:: asammdf.v4_blocks.SourceInformation :members: FileHistory Class ----------------- -.. autoclass:: asammdf.mdf4.FileHistory +.. autoclass:: asammdf.v4_blocks.FileHistory :members: TextBlock Class --------------- -.. autoclass:: asammdf.mdf4.TextBlock +.. autoclass:: asammdf.v4_blocks.TextBlock :members: diff --git a/pip_requirements_tests.txt b/pip_requirements_tests.txt index 84a446792..eb43dbca6 100644 --- a/pip_requirements_tests.txt +++ b/pip_requirements_tests.txt @@ -1,2 +1,4 @@ xmlrunner +coverage +codacy-coverage numpy>=1.13.1 diff --git a/test/test_mdf.py b/test/test_mdf.py index 5b68ce483..c690c3ea2 100644 --- a/test/test_mdf.py +++ b/test/test_mdf.py @@ -1,16 +1,25 @@ #!/usr/bin/env python from __future__ import print_function import os +import random import sys import unittest import shutil import urllib +from itertools import product from zipfile import ZipFile + import numpy as np -from utils import MEMORY -from asammdf import MDF, SUPPORTED_VERSIONS +from utils import ( + CHANNELS_DEMO, + CHANNELS_ARRAY, + COMMENTS, + MEMORY, + UNITS, +) +from asammdf import MDF, SUPPORTED_VERSIONS, configure CHANNEL_LEN = 100000 @@ -24,75 +33,237 @@ def test_measurement(self): def setUpClass(cls): PYVERSION = sys.version_info[0] - url = 'https://github.com/danielhrisca/asammdf/files/1565237/test.files.zip' + url = 'https://github.com/danielhrisca/asammdf/files/1594267/test.demo.zip' + if PYVERSION == 3: + urllib.request.urlretrieve(url, 'test.zip') + else: + urllib.urlretrieve(url, 'test.zip') + ZipFile(r'test.zip').extractall('tmpdir_demo') + + url = 'https://github.com/danielhrisca/asammdf/files/1592123/test.arrays.zip' if PYVERSION == 3: urllib.request.urlretrieve(url, 'test.zip') else: urllib.urlretrieve(url, 'test.zip') - ZipFile(r'test.zip').extractall('tmpdir') + ZipFile(r'test.zip').extractall('tmpdir_array') + + configure( + integer_compacting=False, + split_data_blocks=False, + split_threshold=260, + overwrite=True, + ) @classmethod def tearDownClass(cls): - shutil.rmtree('tmpdir', True) + shutil.rmtree('tmpdir_demo', True) + shutil.rmtree('tmpdir_array', True) os.remove('test.zip') - if os.path.isfile('tmp'): - os.remove('tmp') + for filename in os.listdir(os.getcwd()): + if os.path.isfile(filename) and filename.startswith('tmp'): + os.remove(filename) def test_read(self): + print("MDF read tests") - for mdf in os.listdir('tmpdir'): + ret = True + + for enable in (True, False): + configure(enable, enable) + for mdf in os.listdir('tmpdir_demo'): + for memory in MEMORY: + with MDF(os.path.join('tmpdir_demo', mdf), memory=memory) as input_file: + if input_file.version == '2.00': + continue + for name in set(input_file.channels_db) - {'time', 't'}: + signal = input_file.get(name) + original_samples = CHANNELS_DEMO[name] + if signal.samples.dtype.kind == 'f': + signal = signal.astype(np.float32) + res = np.array_equal(signal.samples, original_samples) + if not res: + ret = False + + self.assertTrue(ret) + + def test_get_channel_comment_v4(self): + print("MDF get channel comment tests") + + ret = True + + for mdf in os.listdir('tmpdir_demo'): + for memory in MEMORY: + with MDF(os.path.join('tmpdir_demo', mdf), memory=memory) as input_file: + if input_file.version < '4.00': + continue + print(mdf, memory) + for channel_name, original_comment in COMMENTS.items(): + comment = input_file.get_channel_comment(channel_name) + if comment != original_comment: + print(channel_name, original_comment, comment) + ret = False + + self.assertTrue(ret) + + def test_get_channel_units(self): + print("MDF get channel units tests") + + ret = True + + for mdf in os.listdir('tmpdir_demo'): for memory in MEMORY: - MDF(os.path.join('tmpdir', mdf), memory=memory).close() + with MDF(os.path.join('tmpdir_demo', mdf), memory=memory) as input_file: + if input_file.version == '2.00': + continue + print(mdf, memory) + for channel_name, original_unit in UNITS.items(): + comment = input_file.get_channel_unit(channel_name) + if comment != original_unit: + print(channel_name, original_unit, comment) + ret = False + + self.assertTrue(ret) + + + def test_read_array(self): + + print("MDF read array tests") + + ret = True + + for enable in (True, False): + configure(enable, enable) + + for mdf in os.listdir('tmpdir_array'): + for memory in MEMORY: + with MDF(os.path.join('tmpdir_array', mdf), memory=memory) as input_file: + if input_file.version == '2.00': + continue + for name in set(input_file.channels_db) - {'time', 't'}: + signal = input_file.get(name) + original_samples = CHANNELS_ARRAY[name] + res = np.array_equal(signal.samples, original_samples) + if not res: + ret = False + + self.assertTrue(ret) def test_convert(self): print("MDF convert tests") + for enable in (True, ): + configure(enable, enable) + + for out in SUPPORTED_VERSIONS[1:]: + for mdfname in os.listdir('tmpdir_demo'): + for memory in MEMORY[-1:]: + input_file = os.path.join('tmpdir_demo', mdfname) + if MDF(input_file).version == '2.00': + continue + print(input_file, memory, out) + with MDF(input_file, memory=memory) as mdf: + outfile = mdf.convert(out, memory=memory).save('tmp', + overwrite=True) + + equal = True + + with MDF(input_file, memory=memory) as mdf, \ + MDF(outfile, memory=memory) as mdf2: + + for name in set(mdf.channels_db) - {'t', 'time'}: + original = mdf.get(name) + converted = mdf2.get(name) + if not np.array_equal( + original.samples, + converted.samples): + print(name, original, converted) + equal = False + if not np.array_equal( + original.timestamps, + converted.timestamps): + equal = False + + self.assertTrue(equal) + + def test_merge(self): + print("MDF merge tests") + + configure(False, False) + + for out in SUPPORTED_VERSIONS: + for mdfname in os.listdir('tmpdir_demo'): + for memory in MEMORY[:1]: + + input_file = os.path.join('tmpdir_demo', mdfname) + files = [input_file, ] * 4 + + outfile = MDF.merge(files, out, memory).save('tmp', overwrite=True) + + equal = True + + with MDF(input_file, memory=memory) as mdf, \ + MDF(outfile, memory=memory) as mdf2: + + for i, group in enumerate(mdf.groups): + for j, _ in enumerate(group['channels'][1:], 1): + original = mdf.get(group=i, index=j) + converted = mdf2.get(group=i, index=j) + if not np.array_equal( + np.tile(original.samples, 4), + converted.samples): + equal = False + + self.assertTrue(equal) + + configure(True, True) + for out in SUPPORTED_VERSIONS[1:]: - for mdfname in os.listdir('tmpdir'): + for mdfname in os.listdir('tmpdir_demo'): for memory in MEMORY: - input_file = os.path.join('tmpdir', mdfname) - with MDF(input_file, memory=memory) as mdf: - mdf.convert(out, memory=memory).save('tmp', - overwrite=True) + input_file = os.path.join('tmpdir_demo', mdfname) + if '2.00' in input_file: + continue + files = [input_file, ] * 4 + + outfile = MDF.merge(files, out, memory).save('tmp', overwrite=True) equal = True with MDF(input_file, memory=memory) as mdf, \ - MDF('tmp', memory=memory) as mdf2: + MDF(outfile, memory=memory) as mdf2: for name in set(mdf.channels_db) - {'t', 'time'}: original = mdf.get(name) converted = mdf2.get(name) if not np.array_equal( - original.samples, + np.tile(original.samples, 4), converted.samples): - equal = False - if not np.array_equal( - original.timestamps, - converted.timestamps): + print(original, converted) equal = False self.assertTrue(equal) - def test_merge(self): - print("MDF merge tests") + def test_merge_array(self): + print("MDF merge array tests") - for out in SUPPORTED_VERSIONS: - for mdfname in os.listdir('tmpdir'): + configure(False, False) + + for out in (version for version in SUPPORTED_VERSIONS if version >= '4.00'): + for mdfname in os.listdir('tmpdir_array'): for memory in MEMORY: - input_file = os.path.join('tmpdir', mdfname) + input_file = os.path.join('tmpdir_array', mdfname) files = [input_file, ] * 4 - MDF.merge(files, out, memory).save('tmp', overwrite=True) + outfile = MDF.merge(files, out, memory).save('tmp', overwrite=True) equal = True with MDF(input_file, memory=memory) as mdf, \ - MDF('tmp', memory=memory) as mdf2: + MDF(outfile, memory=memory) as mdf2: for i, group in enumerate(mdf.groups): - for j, channel in enumerate(group['channels'][1:], 1): + for j, _ in enumerate(group['channels'][1:], 1): original = mdf.get(group=i, index=j) converted = mdf2.get(group=i, index=j) if not np.array_equal( @@ -102,6 +273,583 @@ def test_merge(self): self.assertTrue(equal) + configure(True, True) + + for out in (version for version in SUPPORTED_VERSIONS if version >= '4.00'): + for mdfname in os.listdir('tmpdir_array'): + for memory in MEMORY: + input_file = os.path.join('tmpdir_array', mdfname) + files = [input_file, ] * 4 + + outfile = MDF.merge(files, out, memory).save('tmp', overwrite=True) + + equal = True + + with MDF(input_file, memory=memory) as mdf, \ + MDF(outfile, memory=memory) as mdf2: + + for name in set(mdf.channels_db) - {'t', 'time'}: + original = mdf.get(name) + converted = mdf2.get(name) + if not np.array_equal( + np.tile(original.samples, 4), + converted.samples): + equal = False + + self.assertTrue(equal) + + def test_cut_absolute(self): + print("MDF cut absolute tests") + + configure(False, False) + + for mdfname in os.listdir('tmpdir_demo'): + for memory in MEMORY: + input_file = os.path.join('tmpdir_demo', mdfname) + + print(input_file, memory) + + outfile1 = MDF(input_file, memory=memory).cut(stop=2).save('tmp1', overwrite=True) + outfile2 = MDF(input_file, memory=memory).cut(start=2, stop=6).save('tmp2', overwrite=True) + outfile3 = MDF(input_file, memory=memory).cut(start=6).save('tmp3', overwrite=True) + + outfile = MDF.merge( + [outfile1, outfile2, outfile3], + MDF(input_file, memory='minimum').version, + ).save('tmp', overwrite=True) + + equal = True + + with MDF(input_file, memory=memory) as mdf, \ + MDF(outfile, memory=memory) as mdf2: + + for i, group in enumerate(mdf.groups): + for j, _ in enumerate(group['channels'][1:], 1): + original = mdf.get(group=i, index=j) + converted = mdf2.get(group=i, index=j) + if not np.array_equal( + original.samples, + converted.samples): + equal = False + if not np.array_equal( + original.timestamps, + converted.timestamps): + equal = False + + self.assertTrue(equal) + + configure(True, True) + + for mdfname in os.listdir('tmpdir_demo'): + for memory in MEMORY: + input_file = os.path.join('tmpdir_demo', mdfname) + if '2.00' in input_file: + continue + + outfile1 = MDF(input_file, memory=memory).cut(stop=2).save('tmp1', overwrite=True) + outfile2 = MDF(input_file, memory=memory).cut(start=2, stop=6).save('tmp2', overwrite=True) + outfile3 = MDF(input_file, memory=memory).cut(start=6).save('tmp3', overwrite=True) + + outfile = MDF.merge( + [outfile1, outfile2, outfile3], + MDF(input_file, memory='minimum').version, + ).save('tmp', overwrite=True) + + equal = True + + with MDF(input_file, memory=memory) as mdf, \ + MDF(outfile, memory=memory) as mdf2: + + for name in set(mdf.channels_db) - {'time', 't'}: + original = mdf.get(name) + converted = mdf2.get(name) + if not np.array_equal( + original.samples, + converted.samples): + print(original, converted, '='*80, sep='\n') + equal = False + if not np.array_equal( + original.timestamps, + converted.timestamps): + equal = False + + self.assertTrue(equal) + + + def test_cut_absolute_array(self): + print("MDF cut absolute array tests") + + configure(False, False) + + for mdfname in os.listdir('tmpdir_array'): + for memory in MEMORY: + input_file = os.path.join('tmpdir_array', mdfname) + + outfile1 = MDF(input_file, memory=memory).cut(stop=2.1).save('tmp1', overwrite=True) + outfile2 = MDF(input_file, memory=memory).cut(start=2.1, stop=6.1).save('tmp2', overwrite=True) + outfile3 = MDF(input_file, memory=memory).cut(start=6.1).save('tmp3', overwrite=True) + + outfile = MDF.merge( + [outfile1, outfile2, outfile3], + MDF(input_file, memory='minimum').version, + ).save('tmp', overwrite=True) + + equal = True + + with MDF(input_file, memory=memory) as mdf, \ + MDF(outfile, memory=memory) as mdf2: + + for i, group in enumerate(mdf.groups): + for j, _ in enumerate(group['channels'][1:], 1): + original = mdf.get(group=i, index=j) + converted = mdf2.get(group=i, index=j) + if not np.array_equal( + original.samples, + converted.samples): + equal = False + if not np.array_equal( + original.timestamps, + converted.timestamps): + equal = False + + self.assertTrue(equal) + + configure(True, True) + + for mdfname in os.listdir('tmpdir_array'): + for memory in MEMORY: + input_file = os.path.join('tmpdir_array', mdfname) + + outfile1 = MDF(input_file, memory=memory).cut(stop=2.1).save('tmp1', overwrite=True) + outfile2 = MDF(input_file, memory=memory).cut(start=2.1, stop=6.1).save('tmp2', overwrite=True) + outfile3 = MDF(input_file, memory=memory).cut(start=6.1).save('tmp3', overwrite=True) + + outfile = MDF.merge( + [outfile1, outfile2, outfile3], + MDF(input_file, memory='minimum').version, + ).save('tmp', overwrite=True) + + equal = True + + with MDF(input_file, memory=memory) as mdf, \ + MDF(outfile, memory=memory) as mdf2: + + for name in set(mdf.channels_db) - {'time', 't'}: + original = mdf.get(name) + converted = mdf2.get(name) + if not np.array_equal( + original.samples, + converted.samples): + equal = False + if not np.array_equal( + original.timestamps, + converted.timestamps): + equal = False + + self.assertTrue(equal) + + def test_cut_relative(self): + print("MDF cut relative tests") + + configure(False, False) + + for mdfname in os.listdir('tmpdir_demo'): + for memory in MEMORY: + input_file = os.path.join('tmpdir_demo', mdfname) + if '2.00' in input_file: + continue + + outfile1 = MDF(input_file, memory=memory).cut(stop=3, whence=1).save('tmp1', overwrite=True) + outfile2 = MDF(input_file, memory=memory).cut(start=3, stop=5, whence=1).save('tmp2', overwrite=True) + outfile3 = MDF(input_file, memory=memory).cut(start=5, whence=1).save('tmp3', overwrite=True) + + outfile = MDF.merge( + [outfile1, outfile2, outfile3], + MDF(input_file, memory='minimum').version, + ).save('tmp', overwrite=True) + + equal = True + + with MDF(input_file, memory=memory) as mdf, \ + MDF(outfile, memory=memory) as mdf2: + + for i, group in enumerate(mdf.groups): + for j, _ in enumerate(group['channels'][1:], 1): + original = mdf.get(group=i, index=j) + converted = mdf2.get(group=i, index=j) + if not np.array_equal( + original.samples, + converted.samples): + equal = False + if not np.array_equal( + original.timestamps, + converted.timestamps): + equal = False + + self.assertTrue(equal) + + configure(True, True) + + for mdfname in os.listdir('tmpdir_demo'): + for memory in MEMORY: + input_file = os.path.join('tmpdir_demo', mdfname) + + outfile1 = MDF(input_file, memory=memory).cut(stop=3, whence=1).save('tmp1', overwrite=True) + outfile2 = MDF(input_file, memory=memory).cut(start=3, stop=5, whence=1).save('tmp2', overwrite=True) + outfile3 = MDF(input_file, memory=memory).cut(start=5, whence=1).save('tmp3', overwrite=True) + + outfile = MDF.merge( + [outfile1, outfile2, outfile3], + MDF(input_file, memory='minimum').version, + ).save('tmp', overwrite=True) + + equal = True + + with MDF(input_file, memory=memory) as mdf, \ + MDF(outfile, memory=memory) as mdf2: + + for name in set(mdf.channels_db) - {'time', 't'}: + original = mdf.get(name) + converted = mdf2.get(name) + if not np.array_equal( + original.samples, + converted.samples): + equal = False + if not np.array_equal( + original.timestamps, + converted.timestamps): + equal = False + + self.assertTrue(equal) + + def test_cut_relative_array(self): + print("MDF cut relative array tests") + + configure(False, False) + + for mdfname in os.listdir('tmpdir_array'): + for memory in MEMORY: + input_file = os.path.join('tmpdir_array', mdfname) + + outfile1 = MDF(input_file, memory=memory).cut(stop=3.1, whence=1).save('tmp1', overwrite=True) + outfile2 = MDF(input_file, memory=memory).cut(start=3.1, stop=5.1, whence=1).save('tmp2', overwrite=True) + outfile3 = MDF(input_file, memory=memory).cut(start=5.1, whence=1).save('tmp3', overwrite=True) + + outfile = MDF.merge( + [outfile1, outfile2, outfile3], + MDF(input_file, memory='minimum').version, + ).save('tmp', overwrite=True) + + equal = True + + with MDF(input_file, memory=memory) as mdf, \ + MDF(outfile, memory=memory) as mdf2: + + for i, group in enumerate(mdf.groups): + for j, _ in enumerate(group['channels'][1:], 1): + original = mdf.get(group=i, index=j) + converted = mdf2.get(group=i, index=j) + if not np.array_equal( + original.samples, + converted.samples): + equal = False + if not np.array_equal( + original.timestamps, + converted.timestamps): + equal = False + + self.assertTrue(equal) + + configure(True, True) + + for mdfname in os.listdir('tmpdir_array'): + for memory in MEMORY: + input_file = os.path.join('tmpdir_array', mdfname) + + outfile1 = MDF(input_file, memory=memory).cut(stop=3.1, whence=1).save('tmp1', overwrite=True) + outfile2 = MDF(input_file, memory=memory).cut(start=3.1, stop=5.1, whence=1).save('tmp2', overwrite=True) + outfile3 = MDF(input_file, memory=memory).cut(start=5.1, whence=1).save('tmp3', overwrite=True) + + outfile = MDF.merge( + [outfile1, outfile2, outfile3], + MDF(input_file, memory='minimum').version, + ).save('tmp', overwrite=True) + + equal = True + + with MDF(input_file, memory=memory) as mdf, \ + MDF(outfile, memory=memory) as mdf2: + + for name in set(mdf.channels_db) - {'time', 't'}: + original = mdf.get(name) + converted = mdf2.get(name) + if not np.array_equal( + original.samples, + converted.samples): + equal = False + if not np.array_equal( + original.timestamps, + converted.timestamps): + equal = False + + self.assertTrue(equal) + + def test_filter(self): + print("MDF filter tests") + + for enable in (True, False): + configure(enable, enable) + + for mdfname in os.listdir('tmpdir_demo'): + for memory in MEMORY: + input_file = os.path.join('tmpdir_demo', mdfname) + + if MDF(input_file, memory=memory).version == '2.00': + continue + + channels_nr = np.random.randint(1, len(CHANNELS_DEMO) + 1) + + channel_list = random.sample(list(CHANNELS_DEMO), channels_nr) + + filtered_mdf = MDF(input_file, memory=memory).filter(channel_list, memory=memory) + + self.assertTrue((set(filtered_mdf.channels_db) - {'t', 'time'}) == set(channel_list)) + + equal = True + + with MDF(input_file, memory=memory) as mdf: + + for name in channel_list: + original = mdf.get(name) + filtered = filtered_mdf.get(name) + if not np.array_equal( + original.samples, + filtered.samples): + equal = False + if not np.array_equal( + original.timestamps, + filtered.timestamps): + equal = False + + self.assertTrue(equal) + + def test_filter_array(self): + print("MDF filter array tests") + + for enable in (True, False): + configure(enable, enable) + + for mdfname in os.listdir('tmpdir_array'): + for memory in MEMORY[:1]: + input_file = os.path.join('tmpdir_array', mdfname) + + channels_nr = np.random.randint(1, len(CHANNELS_ARRAY) + 1) + + channel_list = random.sample(list(CHANNELS_ARRAY), channels_nr) + + filtered_mdf = MDF(input_file, memory=memory).filter(channel_list, memory=memory) + + filtered_mdf.save('fiteed.mf4', overwrite=True) + + target = set(channel_list) + if 'Int16Array' in target: + target = target - {'XAxis', 'YAxis'} + if 'Maths' in target: + target = target - {'Saw', 'Ones', 'Cos', 'Sin', 'Zeros'} + if 'Composed' in target: + target = target - {'Int32', 'Float64', 'Uint8', 'Uint64'} + + actual = set(filtered_mdf.channels_db) - {'t', 'time'} + + if 'Int16Array' in actual: + actual = actual - {'XAxis', 'YAxis'} + if 'Maths' in actual: + actual = actual - {'Saw', 'Ones', 'Cos', 'Sin', 'Zeros'} + if 'Composed' in actual: + actual = actual - {'Int32', 'Float64', 'Uint8', 'Uint64'} + + self.assertTrue(actual == target) + + equal = True + + with MDF(input_file, memory=memory) as mdf: + + for name in channel_list: + original = mdf.get(name) + filtered = filtered_mdf.get(name) + if not np.array_equal( + original.samples, + filtered.samples): + equal = False + if not np.array_equal( + original.timestamps, + filtered.timestamps): + equal = False + + self.assertTrue(equal) + + def test_save(self): + print("MDF save tests") + + compressions = [0, 1, 2] + split_sizes = [260, 10**5] + split_enables = [True, False] + overwrite_enables = [True, False] + for compression, memory, size, split_enable, overwrite in product(compressions, MEMORY, split_sizes, split_enables, overwrite_enables): + configure( + integer_compacting=False, + split_data_blocks=split_enable, + split_threshold=size, + overwrite=overwrite, + ) + + for mdfname in os.listdir('tmpdir_demo'): + input_file = os.path.join('tmpdir_demo', mdfname) + if MDF(input_file).version == '2.00': + continue + print(input_file, compression, memory, size, split_enable, overwrite) + with MDF(input_file, memory=memory) as mdf: + out_file = mdf.save('tmp', compression=compression) + print(out_file) + + equal = True + + with MDF(input_file, memory=memory) as mdf, \ + MDF(out_file, memory=memory) as mdf2: + + for name in set(mdf.channels_db) - {'t', 'time'}: + original = mdf.get(name) + converted = mdf2.get(name) + if not np.array_equal( + original.samples, + converted.samples): + print(name, original, converted) + equal = False + if not np.array_equal( + original.timestamps, + converted.timestamps): + equal = False + + self.assertTrue(equal) + + def test_save_array(self): + print("MDF save array tests") + + compressions = [0, 1, 2] + split_sizes = [260, 10**5] + split_enables = [True, False] + overwrite_enables = [True, False] + for compression, memory, size, split_enable, overwrite in product(compressions, MEMORY, split_sizes, split_enables, overwrite_enables): + configure( + integer_compacting=False, + split_data_blocks=split_enable, + split_threshold=size, + overwrite=overwrite, + ) + + for mdfname in os.listdir('tmpdir_array'): + input_file = os.path.join('tmpdir_array', mdfname) + print(input_file, compression, memory, size, split_enable, overwrite) + with MDF(input_file, memory=memory) as mdf: + out_file = mdf.save('tmp', compression=compression) + + equal = True + + with MDF(input_file, memory=memory) as mdf, \ + MDF(out_file, memory=memory) as mdf2: + + for name in set(mdf.channels_db) - {'t', 'time'}: + original = mdf.get(name) + converted = mdf2.get(name) + if not np.array_equal( + original.samples, + converted.samples): + print(name, original, converted) + equal = False + if not np.array_equal( + original.timestamps, + converted.timestamps): + equal = False + + self.assertTrue(equal) + + def test_select(self): + print("MDF select tests") + + for enable in (True, False): + configure(enable, enable) + + for mdfname in os.listdir('tmpdir_demo'): + for memory in MEMORY: + input_file = os.path.join('tmpdir_demo', mdfname) + + if MDF(input_file).version == '2.00': + continue + + channels_nr = np.random.randint(1, len(CHANNELS_DEMO) + 1) + + channel_list = random.sample(list(CHANNELS_DEMO), channels_nr) + + selected_signals = MDF(input_file, memory=memory).select(channel_list) + + self.assertTrue(len(selected_signals) == len(channel_list)) + + self.assertTrue(all(ch.name == name for ch, name in zip(selected_signals, channel_list))) + + equal = True + + with MDF(input_file, memory=memory) as mdf: + + for selected in selected_signals: + original = mdf.get(selected.name) + if not np.array_equal( + original.samples, + selected.samples): + equal = False + if not np.array_equal( + original.timestamps, + selected.timestamps): + equal = False + + self.assertTrue(equal) + + def test_select_array(self): + print("MDF select array tests") + + for enable in (True, False): + configure(enable, enable) + + for mdfname in os.listdir('tmpdir_array'): + for memory in MEMORY: + input_file = os.path.join('tmpdir_array', mdfname) + + channels_nr = np.random.randint(1, len(CHANNELS_ARRAY) + 1) + + channel_list = random.sample(list(CHANNELS_ARRAY), channels_nr) + + selected_signals = MDF(input_file, memory=memory).select(channel_list) + + self.assertTrue(len(selected_signals) == len(channel_list)) + + self.assertTrue(all(ch.name == name for ch, name in zip(selected_signals, channel_list))) + + equal = True + + with MDF(input_file, memory=memory) as mdf: + + for selected in selected_signals: + original = mdf.get(selected.name) + if not np.array_equal( + original.samples, + selected.samples): + equal = False + if not np.array_equal( + original.timestamps, + selected.timestamps): + equal = False + + self.assertTrue(equal) + if __name__ == '__main__': unittest.main() diff --git a/test/test_mdf2.py b/test/test_mdf2.py deleted file mode 100644 index c71b3d2a5..000000000 --- a/test/test_mdf2.py +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env python -from __future__ import print_function -import os -import unittest - -import numpy as np - -from utils import MEMORY -from asammdf import MDF, MDF2, Signal - -CHANNEL_LEN = 100000 - - -class TestMDF2(unittest.TestCase): - - def test_measurement(self): - self.assertTrue(MDF2) - - def test_read_mdf2_00(self): - - seed = np.random.randint(0, 2**31) - - np.random.seed(seed) - print('Read 2.00 using seed =', seed) - - sig_int = Signal( - np.random.randint(-2**31, 2**31, CHANNEL_LEN), - np.arange(CHANNEL_LEN), - name='Integer Channel', - unit='unit1', - ) - - sig_float = Signal( - np.random.random(CHANNEL_LEN), - np.arange(CHANNEL_LEN), - name='Float Channel', - unit='unit2', - ) - - for memory in MEMORY: - - with MDF(version='2.00', memory=memory) as mdf: - mdf.append([sig_int, sig_float], common_timebase=True) - mdf.save('tmp', overwrite=True) - - with MDF('tmp', memory=memory) as mdf: - ret_sig_int = mdf.get(sig_int.name) - ret_sig_float = mdf.get(sig_float.name) - - self.assertTrue(np.array_equal(ret_sig_int.samples, - sig_int.samples)) - self.assertTrue(np.array_equal(ret_sig_float.samples, - sig_float.samples)) - - os.remove('tmp') - - def test_read_mdf2_14(self): - - seed = np.random.randint(0, 2**31) - - np.random.seed(seed) - print('Read 2.14 using seed =', seed) - - sig_int = Signal( - np.random.randint(-2**31, 2**31, CHANNEL_LEN), - np.arange(CHANNEL_LEN), - name='Integer Channel', - unit='unit1', - ) - - sig_float = Signal( - np.random.random(CHANNEL_LEN), - np.arange(CHANNEL_LEN), - name='Float Channel', - unit='unit2', - ) - - for memory in MEMORY: - with MDF(version='2.14', memory=memory) as mdf: - mdf.append([sig_int, sig_float], common_timebase=True) - mdf.save('tmp', overwrite=True) - - with MDF('tmp', memory=memory) as mdf: - ret_sig_int = mdf.get(sig_int.name) - ret_sig_float = mdf.get(sig_float.name) - - self.assertTrue(np.array_equal(ret_sig_int.samples, - sig_int.samples)) - self.assertTrue(np.array_equal(ret_sig_float.samples, - sig_float.samples)) - - os.remove('tmp') - - -if __name__ == '__main__': - unittest.main() diff --git a/test/test_mdf23.py b/test/test_mdf23.py new file mode 100644 index 000000000..84b783c0d --- /dev/null +++ b/test/test_mdf23.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python +from __future__ import print_function +import unittest + +import numpy as np + +from utils import MEMORY +from asammdf import MDF, MDF2, MDF3, Signal, configure + +CHANNEL_LEN = 10000 + +configure(integer_compacting=True, + split_data_blocks=True, + split_threshold=260, + overwrite=True) + + +class TestMDF23(unittest.TestCase): + + def test_measurement(self): + self.assertTrue(MDF2) + self.assertTrue(MDF3) + + def test_read_mdf2_00(self): + + seed = np.random.randint(0, 2**31) + + np.random.seed(seed) + print('Read 2.00 using seed =', seed) + + sig_int = Signal( + np.random.randint(-2**15, -1, CHANNEL_LEN), + np.arange(CHANNEL_LEN), + name='Integer Channel', + unit='unit1', + ) + + sig_float = Signal( + np.random.random(CHANNEL_LEN), + np.arange(CHANNEL_LEN), + name='Float Channel', + unit='unit2', + ) + + for memory in MEMORY: + print(memory) + + with MDF(version='2.00', memory=memory) as mdf: + mdf.append([sig_int, sig_float], common_timebase=True) + outfile = mdf.save('tmp', overwrite=True) + + with MDF(outfile, memory=memory) as mdf: + ret_sig_int = mdf.get(sig_int.name) + ret_sig_float = mdf.get(sig_float.name) + + self.assertTrue(np.array_equal(ret_sig_int.samples, + sig_int.samples)) + self.assertTrue(np.array_equal(ret_sig_float.samples, + sig_float.samples)) + + def test_read_mdf2_14(self): + + seed = np.random.randint(0, 2**31) + + np.random.seed(seed) + print('Read 2.14 using seed =', seed) + + sig_int = Signal( + np.random.randint(-2**29, 2**29, CHANNEL_LEN), + np.arange(CHANNEL_LEN), + name='Integer Channel', + unit='unit1', + ) + + sig_float = Signal( + np.random.random(CHANNEL_LEN), + np.arange(CHANNEL_LEN), + name='Float Channel', + unit='unit2', + ) + + for memory in MEMORY: + print(memory) + with MDF(version='2.14', memory=memory) as mdf: + mdf.append([sig_int, sig_float], common_timebase=True) + outfile = mdf.save('tmp', overwrite=True) + + with MDF(outfile, memory=memory) as mdf: + ret_sig_int = mdf.get(sig_int.name) + ret_sig_float = mdf.get(sig_float.name) + + self.assertTrue(np.array_equal(ret_sig_int.samples, + sig_int.samples)) + self.assertTrue(np.array_equal(ret_sig_float.samples, + sig_float.samples)) + + def test_read_mdf3_00(self): + + seed = np.random.randint(0, 2**31) + + np.random.seed(seed) + print('Read 3.00 using seed =', seed) + + sig_int = Signal( + np.random.randint(-2**16, 2**16, CHANNEL_LEN, np.int32), + np.arange(CHANNEL_LEN), + name='Integer Channel', + unit='unit1', + ) + + sig_float = Signal( + np.random.random(CHANNEL_LEN), + np.arange(CHANNEL_LEN), + name='Float Channel', + unit='unit2', + ) + + for memory in MEMORY: + print(memory) + + with MDF(version='3.00', memory=memory) as mdf: + mdf.append([sig_int, sig_float], common_timebase=True) + outfile = mdf.save('tmp', overwrite=True) + + with MDF(outfile, memory=memory) as mdf: + ret_sig_int = mdf.get(sig_int.name) + ret_sig_float = mdf.get(sig_float.name) + + self.assertTrue(np.array_equal(ret_sig_int.samples, + sig_int.samples)) + self.assertTrue(np.array_equal(ret_sig_float.samples, + sig_float.samples)) + + def test_read_mdf3_10(self): + + seed = np.random.randint(0, 2**31) + + np.random.seed(seed) + print('Read 3.10 using seed =', seed) + + sig_int = Signal( + np.random.randint(-2**9, 2**7, CHANNEL_LEN), + np.arange(CHANNEL_LEN), + name='Integer Channel', + unit='unit1', + ) + + sig_float = Signal( + np.random.random(CHANNEL_LEN), + np.arange(CHANNEL_LEN), + name='Float Channel', + unit='unit2', + ) + + for memory in MEMORY: + with MDF(version='3.10', memory=memory) as mdf: + mdf.append([sig_int, sig_float], common_timebase=True) + outfile = mdf.save('tmp', overwrite=True) + + print(memory) + + with MDF(outfile, memory=memory) as mdf: + ret_sig_int = mdf.get(sig_int.name) + ret_sig_float = mdf.get(sig_float.name) + + self.assertTrue(np.array_equal(ret_sig_int.samples, + sig_int.samples)) + self.assertTrue(np.array_equal(ret_sig_float.samples, + sig_float.samples)) + + +if __name__ == '__main__': + unittest.main() diff --git a/test/test_mdf3.py b/test/test_mdf3.py deleted file mode 100644 index c70ca749c..000000000 --- a/test/test_mdf3.py +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env python -from __future__ import print_function -import os -import unittest - -import numpy as np - -from utils import MEMORY -from asammdf import MDF, MDF3, Signal - -CHANNEL_LEN = 100000 - - -class TestMDF3(unittest.TestCase): - - def test_measurement(self): - self.assertTrue(MDF3) - - def test_read_mdf3_00(self): - - seed = np.random.randint(0, 2**31) - - np.random.seed(seed) - print('Read 2.00 using seed =', seed) - - sig_int = Signal( - np.random.randint(-2**31, 2**31, CHANNEL_LEN), - np.arange(CHANNEL_LEN), - name='Integer Channel', - unit='unit1', - ) - - sig_float = Signal( - np.random.random(CHANNEL_LEN), - np.arange(CHANNEL_LEN), - name='Float Channel', - unit='unit2', - ) - - for memory in MEMORY: - - with MDF(version='3.00', memory=memory) as mdf: - mdf.append([sig_int, sig_float], common_timebase=True) - mdf.save('tmp', overwrite=True) - - with MDF('tmp', memory=memory) as mdf: - ret_sig_int = mdf.get(sig_int.name) - ret_sig_float = mdf.get(sig_float.name) - - self.assertTrue(np.array_equal(ret_sig_int.samples, - sig_int.samples)) - self.assertTrue(np.array_equal(ret_sig_float.samples, - sig_float.samples)) - - os.remove('tmp') - - def test_read_mdf3_10(self): - - seed = np.random.randint(0, 2**31) - - np.random.seed(seed) - print('Read 3.10 using seed =', seed) - - sig_int = Signal( - np.random.randint(-2**31, 2**31, CHANNEL_LEN), - np.arange(CHANNEL_LEN), - name='Integer Channel', - unit='unit1', - ) - - sig_float = Signal( - np.random.random(CHANNEL_LEN), - np.arange(CHANNEL_LEN), - name='Float Channel', - unit='unit2', - ) - - for memory in MEMORY: - with MDF(version='3.10', memory=memory) as mdf: - mdf.append([sig_int, sig_float], common_timebase=True) - mdf.save('tmp', overwrite=True) - - with MDF('tmp', memory=memory) as mdf: - ret_sig_int = mdf.get(sig_int.name) - ret_sig_float = mdf.get(sig_float.name) - - self.assertTrue(np.array_equal(ret_sig_int.samples, - sig_int.samples)) - self.assertTrue(np.array_equal(ret_sig_float.samples, - sig_float.samples)) - - os.remove('tmp') - - -if __name__ == '__main__': - unittest.main() diff --git a/test/test_mdf4.py b/test/test_mdf4.py index a0d7f7430..40c0b75e7 100644 --- a/test/test_mdf4.py +++ b/test/test_mdf4.py @@ -1,6 +1,5 @@ #!/usr/bin/env python from __future__ import print_function -import os import unittest import numpy as np @@ -41,9 +40,9 @@ def test_read_mdf4_00(self): with MDF(version='4.00', memory=memory) as mdf: mdf.append([sig_int, sig_float], common_timebase=True) - mdf.save('tmp', overwrite=True) + outfile = mdf.save('tmp', overwrite=True) - with MDF('tmp', memory=memory) as mdf: + with MDF(outfile, memory=memory) as mdf: ret_sig_int = mdf.get(sig_int.name) ret_sig_float = mdf.get(sig_float.name) @@ -52,8 +51,6 @@ def test_read_mdf4_00(self): self.assertTrue(np.array_equal(ret_sig_float.samples, sig_float.samples)) - os.remove('tmp') - def test_read_mdf4_10(self): seed = np.random.randint(0, 2**31) @@ -78,9 +75,9 @@ def test_read_mdf4_10(self): for memory in MEMORY: with MDF(version='4.10', memory=memory) as mdf: mdf.append([sig_int, sig_float], common_timebase=True) - mdf.save('tmp', overwrite=True) + outfile = mdf.save('tmp', overwrite=True) - with MDF('tmp', memory=memory) as mdf: + with MDF(outfile, memory=memory) as mdf: ret_sig_int = mdf.get(sig_int.name) ret_sig_float = mdf.get(sig_float.name) @@ -89,8 +86,6 @@ def test_read_mdf4_10(self): self.assertTrue(np.array_equal(ret_sig_float.samples, sig_float.samples)) - os.remove('tmp') - if __name__ == '__main__': unittest.main() diff --git a/test/utils.py b/test/utils.py index 687247d1f..6f87b29d5 100644 --- a/test/utils.py +++ b/test/utils.py @@ -1,10 +1,1651 @@ # -*- coding: utf-8 -*- import os +import numpy as np MEMORY = ('minimum', 'low', 'full') +CHANNELS_DEMO = { + +'$ActiveCalibrationPage': np.array([1, 0, 1, 1], dtype=np.uint8), +'$CalibrationLog': np.array([b'', b'Switch to reference page', b'Switch to working page', + b'Switch to working page'], + dtype='|S24'), +'ASAM.M.SCALAR.UBYTE.TAB_NOINTP_DEFAULT_VALUE': np.array([ 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 104. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 108. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 102. , + 110.66666412, 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. ], dtype=np.float32), +'ASAM_[0].M.MATRIX_DIM_16_1_1.UBYTE.IDENTICAL': np.array([ 88, 98, 108, 118, 128, 138, 148, 158, 168, 178, 188, 198, 208, + 218, 228, 238, 248, 2, 12, 22, 32, 42, 52, 62, 72, 82, + 92, 102, 112, 122, 132, 142, 152, 162, 172, 182, 192, 202, 212, + 222, 0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, + 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, + 250, 4, 14, 24, 34, 44, 54, 64, 74, 84, 94, 104, 114, + 124, 134, 144, 154, 164, 174, 184, 194, 204, 214, 224, 234], dtype=np.uint8), +'ASAM.M.SCALAR.SWORD.IDENTICAL': np.array([900, 910, 920, 930, 940, 950, 960, 970, 980, 990, 0, 10, 20, + 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, + 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270, 280, + 290, 300, 310, 320, 330, 340, 350, 360, 370, 380, 390, 400, 410, + 420, 430, 440, 450, 460, 470, 480, 490, 500, 510, 520, 530, 540, + 550, 560, 570, 580, 590, 600, 610, 620, 630, 640, 650, 660, 670, + 680, 690, 700, 710, 720, 730, 740, 750, 760, 770, 780, 790], dtype=np.int16), +'ASAM.M.SCALAR.UBYTE.TAB_INTP_DEFAULT_VALUE': np.array([ 111., 111., 111., 111., 111., 111., 111., 111., 111., + 111., 111., 111., 111., 111., 111., 111., 111., 111., + 111., 111., 100., 110., 111., 111., 111., 111., 111., + 111., 111., 111., 111., 111., 111., 111., 111., 111., + 111., 111., 111., 111., 111., 111., 111., 111., 111., + 111., 104., 111., 111., 111., 111., 111., 111., 111., + 111., 111., 111., 111., 111., 111., 111., 111., 111., + 111., 111., 111., 111., 111., 111., 111., 111., 111., + 108., 111., 111., 111., 111., 111., 111., 111., 111., + 111., 111., 111., 111., 111., 111., 111., 111., 111.], dtype=np.float32), +'ASAM.M.SCALAR.UBYTE.FORM_X_PLUS_4': np.array([ 4., 14., 24., 34., 44., 54., 64., 74., 84., + 94., 104., 114., 124., 134., 144., 154., 164., 174., + 184., 194., 204., 214., 224., 234., 244., 254., 8., + 18., 28., 38., 48., 58., 68., 78., 88., 98., + 108., 118., 128., 138., 148., 158., 168., 178., 188., + 198., 208., 218., 228., 238., 248., 258., 12., 22., + 32., 42., 52., 62., 72., 82., 92., 102., 112., + 122., 132., 142., 152., 162., 172., 182., 192., 202., + 212., 222., 232., 242., 252., 6., 16., 26., 36., + 46., 56., 66., 76., 86., 96., 106., 116., 126.], dtype=np.float32), +'ASAM.M.SCALAR.SLONG.IDENTICAL': np.array([200, 210, 220, 230, 240, 250, 260, 270, 280, 290, 300, 310, 320, + 330, 340, 350, 360, 370, 380, 390, 400, 410, 420, 430, 440, 450, + 460, 470, 480, 490, 500, 510, 520, 530, 540, 550, 560, 570, 580, + 590, 600, 610, 620, 630, 640, 650, 660, 670, 680, 690, 700, 710, + 720, 730, 740, 750, 760, 770, 780, 790, 800, 810, 820, 830, 840, + 850, 860, 870, 880, 890, 900, 910, 920, 930, 940, 950, 960, 970, + 980, 990, 0, 10, 20, 30, 40, 50, 60, 70, 80, 90]), +'ASAM.M.SCALAR.UBYTE.IDENTICAL': np.array([188, 198, 208, 218, 228, 238, 248, 2, 12, 22, 32, 42, 52, + 62, 72, 82, 92, 102, 112, 122, 132, 142, 152, 162, 172, 182, + 192, 202, 212, 222, 0, 10, 20, 30, 40, 50, 60, 70, 80, + 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, + 220, 230, 240, 250, 4, 14, 24, 34, 44, 54, 64, 74, 84, + 94, 104, 114, 124, 134, 144, 154, 164, 174, 184, 194, 204, 214, + 224, 234, 244, 254, 8, 18, 28, 38, 48, 58, 68, 78], dtype=np.uint8), +'ASAM.M.SCALAR.UBYTE.TAB_INTP_NO_DEFAULT_VALUE': np.array([ 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 108. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 102. , 110.66666412, 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 100. , 110. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 104. , 111. , + 111. , 111. ], dtype=np.float32), +'ASAM.M.SCALAR.SBYTE.LINEAR_MUL_2': np.array([-136., -116., -96., -76., -56., -36., -16., 4., 24., + 44., 64., 84., 104., 124., 144., 164., 184., 204., + 224., 244., -248., -228., -208., -188., -168., -148., -128., + -108., -88., -68., 0., 20., 40., 60., 80., 100., + 120., 140., 160., 180., 200., 220., 240., -252., -232., + -212., -192., -172., -152., -132., -112., -92., -72., -52., + -32., -12., 8., 28., 48., 68., 88., 108., 128., + 148., 168., 188., 208., 228., 248., -244., -224., -204., + -184., -164., -144., -124., -104., -84., -64., -44., -24., + -4., 16., 36., 56., 76., 96., 116., 136., 156.], dtype=np.float32), +'ASAM_[0].M.ARRAY_SIZE_16.UBYTE.IDENTICAL': np.array([ 44, 54, 64, 74, 84, 94, 104, 114, 124, 134, 144, 154, 164, + 174, 184, 194, 204, 214, 224, 234, 244, 254, 8, 18, 28, 38, + 48, 58, 68, 78, 88, 98, 108, 118, 128, 138, 148, 158, 168, + 178, 188, 198, 208, 218, 228, 238, 248, 2, 12, 22, 32, 42, + 52, 62, 72, 82, 92, 102, 112, 122, 132, 142, 152, 162, 172, + 182, 192, 202, 212, 222, 0, 10, 20, 30, 40, 50, 60, 70, + 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190], dtype=np.uint8), +'ASAM_[0][0].M.MATRIX_DIM_8_2_1.UBYTE.IDENTICAL': np.array([244, 254, 8, 18, 28, 38, 48, 58, 68, 78, 88, 98, 108, + 118, 128, 138, 148, 158, 168, 178, 188, 198, 208, 218, 228, 238, + 248, 2, 12, 22, 32, 42, 52, 62, 72, 82, 92, 102, 112, + 122, 132, 142, 152, 162, 172, 182, 192, 202, 212, 222, 0, 10, + 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, + 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 4, 14, + 24, 34, 44, 54, 64, 74, 84, 94, 104, 114, 124, 134], dtype=np.uint8), +'ASAM.M.SCALAR.UWORD.IDENTICAL.BITMASK_0FF0': np.array([61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 62, 62, 62, + 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, + 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, + 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, + 62, 62, 62, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, + 61, 61, 61, 61, 61, 61, 61, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 60, 60, 60, 60, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, + 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 57, 57, 57, 57, 57, 57, 57, + 57, 57, 56, 56, 56, 56, 56, 56, 56, 56, 56, 55, 55, 55, 55, 55, 55, + 55, 55, 54, 54, 54, 54, 54, 54, 54, 54, 53, 53, 53, 53, 53, 53, 53, + 52, 52, 52, 52, 52, 52, 52, 51, 51, 51, 51, 51, 51, 51, 50, 50, 50, + 50, 50, 50, 49, 49, 49, 49, 49, 49, 48, 48, 48, 48, 48, 48, 47, 47, + 47, 47, 47, 47, 46, 46, 46, 46, 46, 46, 45, 45, 45, 45, 45, 45, 44, + 44, 44, 44, 44, 44, 43, 43, 43, 43, 43, 42, 42, 42, 42, 42, 42, 41, + 41, 41, 41, 41, 40, 40, 40, 40, 40, 39, 39, 39, 39, 39, 39, 38, 38, + 38, 38, 38, 37, 37, 37, 37, 37, 36, 36, 36, 36, 36, 35, 35, 35, 35, + 35, 34, 34, 34, 34, 34, 33, 33, 33, 33, 33, 33, 32, 32, 32, 32, 32, + 31, 31, 31, 31, 31, 30, 30, 30, 30, 30, 29, 29, 29, 29, 29, 28, 28, + 28, 28, 28, 27, 27, 27, 27, 27, 26, 26, 26, 26, 26, 25, 25, 25, 25, + 25, 25, 24, 24, 24, 24, 24, 23, 23, 23, 23, 23, 22, 22, 22, 22, 22, + 21, 21, 21, 21, 21, 21, 20, 20, 20, 20, 20, 19, 19, 19, 19, 19, 19, + 18, 18, 18, 18, 18, 17, 17, 17, 17, 17, 17, 16, 16, 16, 16, 16, 16, + 15, 15, 15, 15, 15, 15, 14, 14, 14, 14, 14, 14, 13, 13, 13, 13, 13, + 13, 12, 12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11, 11, 10, 10, 10, + 10, 10, 10, 10, 9, 9, 9, 9, 9, 9, 9, 8, 8, 8, 8, 8, 8, + 8, 7, 7, 7, 7, 7, 7, 7, 7, 6, 6, 6, 6, 6, 6, 6, 6, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, + 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, + 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, + 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, + 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, + 13, 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 16, 16, 16, + 16, 16, 16, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 19, 19, 19, + 19, 19, 19, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 22, 22, 22, + 22, 22, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, + 25, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 29, + 29, 29, 29, 29, 30, 30, 30, 30, 30, 31, 31, 31, 31, 31, 32, 32, 32, + 32, 32, 33, 33, 33, 33, 33, 33, 34, 34, 34, 34, 34, 35, 35, 35, 35, + 35, 36, 36, 36, 36, 36, 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, 39, + 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 41, 41, 41, 41, 41, 42, 42, + 42, 42, 42, 42, 43, 43, 43, 43, 43, 44, 44, 44, 44, 44, 44, 45, 45, + 45, 45, 45, 45, 46, 46, 46, 46, 46, 46, 47, 47, 47, 47, 47, 47, 48, + 48, 48, 48, 48, 48, 49, 49, 49, 49, 49, 49, 50], dtype=np.uint16), +'ASAM.M.SCALAR.UBYTE.VTAB_RANGE_DEFAULT_VALUE': np.array([ 13, 16, 19, 22, 25, 28, 31, 35, 38, 41, 44, 47, 50, + 53, 56, 60, 63, 66, 69, 72, 75, 78, 81, 84, 87, 90, + 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127, 130, + 133, 136, 139, 142, 145, 148, 151, 154, 157, 160, 163, 166, 169, + 172, 174, 177, 180, 183, 186, 189, 192, 195, 198, 200, 203, 206, + 209, 212, 214, 217, 220, 223, 226, 228, 231, 234, 237, 239, 242, + 245, 247, 250, 253, 255, 2, 5, 7, 10, 13, 15, 18, 20, + 23, 25, 28, 30, 33, 35, 38, 40, 43, 45, 48, 50, 53, + 55, 57, 60, 62, 65, 67, 69, 71, 74, 76, 78, 81, 83, + 85, 87, 89, 92, 94, 96, 98, 100, 102, 104, 107, 109, 111, + 113, 115, 117, 119, 121, 123, 125, 127, 128, 130, 132, 134, 136, + 138, 140, 141, 143, 145, 147, 149, 150, 152, 154, 155, 157, 159, + 160, 162, 163, 165, 167, 168, 170, 171, 173, 174, 176, 177, 178, + 180, 181, 183, 184, 185, 187, 188, 189, 190, 192, 193, 194, 195, + 196, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 213, 214, 215, 216, 217, 217, 218, 219, 219, + 220, 221, 221, 222, 223, 223, 224, 224, 225, 225, 226, 226, 227, + 227, 228, 228, 228, 229, 229, 229, 230, 230, 230, 230, 231, 231, + 231, 231, 231, 231, 231, 231, 231, 231, 232, 231, 231, 231, 231, + 231, 231, 231, 231, 231, 231, 230, 230, 230, 230, 229, 229, 229, + 228, 228, 228, 227, 227, 226, 226, 225, 225, 224, 224, 223, 223, + 222, 221, 221, 220, 219, 219, 218, 217, 217, 216, 215, 214, 213, + 213, 212, 211, 210, 209, 208, 207, 206, 205, 204, 203, 202, 201, + 200, 199, 198, 196, 195, 194, 193, 192, 190, 189, 188, 187, 185, + 184, 183, 181, 180, 178, 177, 176, 174, 173, 171, 170, 168, 167, + 165, 163, 162, 160, 159, 157, 155, 154, 152, 150, 149, 147, 145, + 143, 141, 140, 138, 136, 134, 132, 130, 128, 127, 125, 123, 121, + 119, 117, 115, 113, 111, 109, 107, 104, 102, 100, 98, 96, 94, + 92, 89, 87, 85, 83, 81, 78, 76, 74, 71, 69, 67, 65, + 62, 60, 57, 55, 53, 50, 48, 45, 43, 40, 38, 35, 33, + 30, 28, 25, 23, 20, 18, 15, 13, 10, 7, 5, 2, 255, + 253, 250, 247, 245, 242, 239, 237, 234, 231, 228, 226, 223, 220, + 217, 214, 212, 209, 206, 203, 200, 198, 195, 192, 189, 186, 183, + 180, 177, 174, 172, 169, 166, 163, 160, 157, 154, 151, 148, 145, + 142, 139, 136, 133, 130, 127, 124, 121, 118, 115, 112, 109, 106, + 103, 100, 97, 94, 90, 87, 84, 81, 78, 75, 72, 69, 66, + 63, 60, 56, 53, 50, 47, 44, 41, 38, 35, 31, 28, 25, + 22, 19, 16, 13, 9, 6, 3, 0, 253, 250, 247, 243, 240, + 237, 234, 231, 228, 225, 222, 218, 215, 212, 209, 206, 203, 200, + 196, 193, 190, 187, 184, 181, 178, 175, 171, 168, 165, 162, 159, + 156, 153, 150, 147, 144, 141, 137, 134, 131, 128, 125, 122, 119, + 116, 113, 110, 107, 104, 101, 98, 95, 92, 89, 86, 83, 80, + 77, 74, 71, 68, 65, 62, 59, 57, 54, 51, 48, 45, 42, + 39, 36, 33, 31, 28, 25, 22, 19, 17, 14, 11, 8, 5, + 3, 0, 253, 250, 248, 245, 242, 240, 237, 234, 232, 229, 226, + 224, 221, 218, 216, 213, 211, 208, 206, 203, 201, 198, 196, 193, + 191, 188, 186, 183, 181, 178, 176, 174, 171, 169, 166, 164, 162, + 160, 157, 155, 153, 150, 148, 146, 144, 142, 139, 137, 135, 133, + 131, 129, 127, 124, 122, 120, 118, 116, 114, 112, 110, 108, 106, + 104, 103, 101, 99, 97, 95, 93, 91, 90, 88, 86, 84, 82, + 81, 79, 77, 76, 74, 72, 71, 69, 68, 66, 64, 63, 61, + 60, 58, 57, 55, 54, 53, 51, 50, 48, 47, 46, 44, 43, + 42, 41, 39, 38, 37, 36, 35, 33, 32, 31, 30, 29, 28, + 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 18, 17, 16, + 15, 14, 14, 13, 12, 12, 11, 10, 10, 9, 8, 8, 7, + 7, 6, 6, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, + 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, + 6, 6, 7, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, + 14, 14, 15, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 35, 36, 37, 38, + 39, 41, 42, 43, 44, 46, 47, 48, 50, 51, 53, 54, 55, + 57, 58, 60, 61, 63, 64, 66, 68, 69, 71, 72, 74, 76, + 77, 79, 81, 82, 84, 86, 88, 90, 91, 93, 95, 97, 99, + 101, 103, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, + 127, 129, 131, 133, 135, 137, 139, 142, 144, 146, 148, 150, 153, + 155, 157, 160, 162, 164, 166, 169, 171, 174, 176, 178, 181, 183, + 186, 188, 191, 193, 196, 198, 201, 203, 206, 208, 211, 213], dtype=np.uint8), +'ASAM.M.SCALAR.FLOAT32.IDENTICAL': np.array([ 8.13845703e+02, 8.16285095e+02, 8.18711975e+02, + 8.21126343e+02, 8.23527954e+02, 8.25916870e+02, + 8.28292908e+02, 8.30655945e+02, 8.33005920e+02, + 8.35342773e+02, 8.37666382e+02, 8.39976685e+02, + 8.42273560e+02, 8.44556885e+02, 8.46826660e+02, + 8.49082703e+02, 8.51325012e+02, 8.53553406e+02, + 8.55767822e+02, 8.57968262e+02, 8.60154541e+02, + 8.62326538e+02, 8.64484314e+02, 8.66627686e+02, + 8.68756531e+02, 8.70870911e+02, 8.72970581e+02, + 8.75055542e+02, 8.77125671e+02, 8.79180969e+02, + 8.81221252e+02, 8.83246521e+02, 8.85256592e+02, + 8.87251526e+02, 8.89231140e+02, 8.91195435e+02, + 8.93144226e+02, 8.95077515e+02, 8.96995178e+02, + 8.98897217e+02, 9.00783508e+02, 9.02653931e+02, + 9.04508484e+02, 9.06347107e+02, 9.08169617e+02, + 9.09976074e+02, 9.11766296e+02, 9.13540283e+02, + 9.15297974e+02, 9.17039246e+02, 9.18764038e+02, + 9.20472290e+02, 9.22163940e+02, 9.23838989e+02, + 9.25497253e+02, 9.27138733e+02, 9.28763306e+02, + 9.30371033e+02, 9.31961731e+02, 9.33535339e+02, + 9.35091858e+02, 9.36631226e+02, 9.38153320e+02, + 9.39658142e+02, 9.41145630e+02, 9.42615662e+02, + 9.44068237e+02, 9.45503235e+02, 9.46920715e+02, + 9.48320496e+02, 9.49702637e+02, 9.51066956e+02, + 9.52413513e+02, 9.53742188e+02, 9.55052979e+02, + 9.56345764e+02, 9.57620605e+02, 9.58877319e+02, + 9.60115906e+02, 9.61336365e+02, 9.62538574e+02, + 9.63722595e+02, 9.64888245e+02, 9.66035583e+02, + 9.67164490e+02, 9.68274963e+02, 9.69366943e+02, + 9.70440369e+02, 9.71495239e+02, 9.72531555e+02, + 9.73549133e+02, 9.74548096e+02, 9.75528259e+02, + 9.76489685e+02, 9.77432251e+02, 9.78356018e+02, + 9.79260864e+02, 9.80146851e+02, 9.81013855e+02, + 9.81861816e+02, 9.82690796e+02, 9.83500732e+02, + 9.84291565e+02, 9.85063293e+02, 9.85815857e+02, + 9.86549255e+02, 9.87263428e+02, 9.87958374e+02, + 9.88634033e+02, 9.89290466e+02, 9.89927551e+02, + 9.90545288e+02, 9.91143616e+02, 9.91722595e+02, + 9.92282166e+02, 9.92822327e+02, 9.93342957e+02, + 9.93844177e+02, 9.94325867e+02, 9.94788086e+02, + 9.95230713e+02, 9.95653809e+02, 9.96057373e+02, + 9.96441284e+02, 9.96805664e+02, 9.97150391e+02, + 9.97475525e+02, 9.97781006e+02, 9.98066833e+02, + 9.98332947e+02, 9.98579468e+02, 9.98806274e+02, + 9.99013367e+02, 9.99200745e+02, 9.99368469e+02, + 9.99516479e+02, 9.99644714e+02, 9.99753296e+02, + 9.99842102e+02, 9.99911194e+02, 9.99960510e+02, + 9.99990112e+02, 1.00000000e+03, 9.99990112e+02, + 9.99960510e+02, 9.99911194e+02, 9.99842102e+02, + 9.99753296e+02, 9.99644714e+02, 9.99516479e+02, + 9.99368469e+02, 9.99200745e+02, 9.99013367e+02, + 9.98806274e+02, 9.98579468e+02, 9.98332947e+02, + 9.98066833e+02, 9.97781006e+02, 9.97475525e+02, + 9.97150391e+02, 9.96805664e+02, 9.96441284e+02, + 9.96057373e+02, 9.95653809e+02, 9.95230713e+02, + 9.94788086e+02, 9.94325867e+02, 9.93844177e+02, + 9.93342957e+02, 9.92822327e+02, 9.92282166e+02, + 9.91722595e+02, 9.91143616e+02, 9.90545288e+02, + 9.89927551e+02, 9.89290466e+02, 9.88634033e+02, + 9.87958374e+02, 9.87263428e+02, 9.86549255e+02, + 9.85815857e+02, 9.85063293e+02, 9.84291565e+02, + 9.83500732e+02, 9.82690796e+02, 9.81861816e+02, + 9.81013855e+02, 9.80146851e+02, 9.79260864e+02, + 9.78356018e+02, 9.77432251e+02, 9.76489685e+02, + 9.75528259e+02, 9.74548096e+02, 9.73549133e+02, + 9.72531555e+02, 9.71495239e+02, 9.70440369e+02, + 9.69366943e+02, 9.68274963e+02, 9.67164490e+02, + 9.66035583e+02, 9.64888245e+02, 9.63722595e+02, + 9.62538574e+02, 9.61336365e+02, 9.60115906e+02, + 9.58877319e+02, 9.57620605e+02, 9.56345764e+02, + 9.55052979e+02, 9.53742188e+02, 9.52413513e+02, + 9.51066956e+02, 9.49702637e+02, 9.48320496e+02, + 9.46920715e+02, 9.45503235e+02, 9.44068237e+02, + 9.42615662e+02, 9.41145630e+02, 9.39658142e+02, + 9.38153320e+02, 9.36631226e+02, 9.35091858e+02, + 9.33535339e+02, 9.31961731e+02, 9.30371033e+02, + 9.28763306e+02, 9.27138733e+02, 9.25497253e+02, + 9.23838989e+02, 9.22163940e+02, 9.20472290e+02, + 9.18764038e+02, 9.17039246e+02, 9.15297974e+02, + 9.13540283e+02, 9.11766296e+02, 9.09976074e+02, + 9.08169617e+02, 9.06347107e+02, 9.04508484e+02, + 9.02653931e+02, 9.00783508e+02, 8.98897217e+02, + 8.96995178e+02, 8.95077515e+02, 8.93144226e+02, + 8.91195435e+02, 8.89231140e+02, 8.87251526e+02, + 8.85256592e+02, 8.83246521e+02, 8.81221252e+02, + 8.79180969e+02, 8.77125671e+02, 8.75055542e+02, + 8.72970581e+02, 8.70870911e+02, 8.68756531e+02, + 8.66627686e+02, 8.64484314e+02, 8.62326538e+02, + 8.60154541e+02, 8.57968262e+02, 8.55767822e+02, + 8.53553406e+02, 8.51325012e+02, 8.49082703e+02, + 8.46826660e+02, 8.44556885e+02, 8.42273560e+02, + 8.39976685e+02, 8.37666382e+02, 8.35342773e+02, + 8.33005920e+02, 8.30655945e+02, 8.28292908e+02, + 8.25916870e+02, 8.23527954e+02, 8.21126343e+02, + 8.18711975e+02, 8.16285095e+02, 8.13845703e+02, + 8.11393860e+02, 8.08929810e+02, 8.06453552e+02, + 8.03965149e+02, 8.01464783e+02, 7.98952515e+02, + 7.96428406e+02, 7.93892639e+02, 7.91345215e+02, + 7.88786377e+02, 7.86216064e+02, 7.83634460e+02, + 7.81041687e+02, 7.78437805e+02, 7.75822937e+02, + 7.73197144e+02, 7.70560608e+02, 7.67913391e+02, + 7.65255615e+02, 7.62587341e+02, 7.59908691e+02, + 7.57219788e+02, 7.54520691e+02, 7.51811584e+02, + 7.49092529e+02, 7.46363647e+02, 7.43625061e+02, + 7.40876831e+02, 7.38119080e+02, 7.35351990e+02, + 7.32575562e+02, 7.29789917e+02, 7.26995239e+02, + 7.24191589e+02, 7.21379089e+02, 7.18557861e+02, + 7.15728027e+02, 7.12889648e+02, 7.10042847e+02, + 7.07187805e+02, 7.04324524e+02, 7.01453247e+02, + 6.98573975e+02, 6.95686829e+02, 6.92791992e+02, + 6.89889526e+02, 6.86979614e+02, 6.84062256e+02, + 6.81137695e+02, 6.78205933e+02, 6.75267151e+02, + 6.72321472e+02, 6.69368958e+02, 6.66409790e+02, + 6.63444031e+02, 6.60471802e+02, 6.57493286e+02, + 6.54508484e+02, 6.51517639e+02, 6.48520813e+02, + 6.45518066e+02, 6.42509644e+02, 6.39495544e+02, + 6.36475952e+02, 6.33450989e+02, 6.30420776e+02, + 6.27385376e+02, 6.24344971e+02, 6.21299622e+02, + 6.18249512e+02, 6.15194702e+02, 6.12135376e+02, + 6.09071594e+02, 6.06003540e+02, 6.02931274e+02, + 5.99854980e+02, 5.96774719e+02, 5.93690674e+02, + 5.90602905e+02, 5.87511536e+02, 5.84416748e+02, + 5.81318604e+02, 5.78217224e+02, 5.75112793e+02, + 5.72005371e+02, 5.68895142e+02, 5.65782166e+02, + 5.62666626e+02, 5.59548584e+02, 5.56428223e+02, + 5.53305603e+02, 5.50180847e+02, 5.47054138e+02, + 5.43925598e+02, 5.40795288e+02, 5.37663391e+02, + 5.34530029e+02, 5.31395264e+02, 5.28259277e+02, + 5.25122131e+02, 5.21984070e+02, 5.18845093e+02, + 5.15705383e+02, 5.12565063e+02, 5.09424225e+02, + 5.06283020e+02, 5.03141571e+02, 5.00000000e+02, + 4.96858429e+02, 4.93716980e+02, 4.90575775e+02, + 4.87434967e+02, 4.84294617e+02, 4.81154907e+02, + 4.78015930e+02, 4.74877838e+02, 4.71740723e+02, + 4.68604736e+02, 4.65470001e+02, 4.62336609e+02, + 4.59204681e+02, 4.56074402e+02, 4.52945831e+02, + 4.49819153e+02, 4.46694427e+02, 4.43571808e+02, + 4.40451416e+02, 4.37333374e+02, 4.34217834e+02, + 4.31104858e+02, 4.27994598e+02, 4.24887207e+02, + 4.21782776e+02, 4.18681427e+02, 4.15583282e+02, + 4.12488464e+02, 4.09397125e+02, 4.06309357e+02, + 4.03225281e+02, 4.00145020e+02, 3.97068695e+02, + 3.93996460e+02, 3.90928375e+02, 3.87864624e+02, + 3.84805298e+02, 3.81750488e+02, 3.78700378e+02, + 3.75655060e+02, 3.72614624e+02, 3.69579254e+02, + 3.66549011e+02, 3.63524017e+02, 3.60504456e+02, + 3.57490356e+02, 3.54481903e+02, 3.51479218e+02, + 3.48482361e+02, 3.45491516e+02, 3.42506744e+02, + 3.39528198e+02, 3.36556000e+02, 3.33590240e+02, + 3.30631042e+02, 3.27678528e+02, 3.24732849e+02, + 3.21794067e+02, 3.18862305e+02, 3.15937714e+02, + 3.13020386e+02, 3.10110443e+02, 3.07208008e+02, + 3.04313171e+02, 3.01426056e+02, 2.98546783e+02, + 2.95675476e+02, 2.92812195e+02, 2.89957123e+02, + 2.87110352e+02, 2.84271973e+02, 2.81442108e+02, + 2.78620880e+02, 2.75808380e+02, 2.73004761e+02, + 2.70210083e+02, 2.67424469e+02, 2.64648041e+02, + 2.61880890e+02, 2.59123169e+02, 2.56374939e+02, + 2.53636322e+02, 2.50907440e+02, 2.48188400e+02, + 2.45479294e+02, 2.42780228e+02, 2.40091324e+02, + 2.37412689e+02, 2.34744415e+02, 2.32086609e+02, + 2.29439377e+02, 2.26802826e+02, 2.24177063e+02, + 2.21562195e+02, 2.18958313e+02, 2.16365524e+02, + 2.13783936e+02, 2.11213654e+02, 2.08654755e+02, + 2.06107376e+02, 2.03571594e+02, 2.01047516e+02, + 1.98535233e+02, 1.96034851e+02, 1.93546478e+02, + 1.91070190e+02, 1.88606110e+02, 1.86154312e+02, + 1.83714920e+02, 1.81288010e+02, 1.78873672e+02, + 1.76472015e+02, 1.74083130e+02, 1.71707123e+02, + 1.69344070e+02, 1.66994064e+02, 1.64657211e+02, + 1.62333603e+02, 1.60023315e+02, 1.57726440e+02, + 1.55443100e+02, 1.53173340e+02, 1.50917297e+02, + 1.48675018e+02, 1.46446609e+02, 1.44232162e+02, + 1.42031754e+02, 1.39845490e+02, 1.37673431e+02, + 1.35515686e+02, 1.33372330e+02, 1.31243439e+02, + 1.29129120e+02, 1.27029427e+02, 1.24944466e+02, + 1.22874313e+02, 1.20819046e+02, 1.18778748e+02, + 1.16753494e+02, 1.14743378e+02, 1.12748466e+02, + 1.10768852e+02, 1.08804596e+02, 1.06855782e+02, + 1.04922493e+02, 1.03004799e+02, 1.01102783e+02, + 9.92165070e+01, 9.73460541e+01, 9.54915009e+01, + 9.36529160e+01, 9.18303757e+01, 9.00239487e+01, + 8.82337036e+01, 8.64597092e+01, 8.47020493e+01, + 8.29607849e+01, 8.12359772e+01, 7.95277100e+01, + 7.78360367e+01, 7.61610336e+01, 7.45027618e+01, + 7.28612823e+01, 7.12366714e+01, 6.96289902e+01, + 6.80382919e+01, 6.64646530e+01, 6.49081192e+01, + 6.33687744e+01, 6.18466606e+01, 6.03418465e+01, + 5.88543854e+01, 5.73843460e+01, 5.59317741e+01, + 5.44967384e+01, 5.30792885e+01, 5.16794815e+01, + 5.02973747e+01, 4.89330215e+01, 4.75864754e+01, + 4.62577858e+01, 4.49470139e+01, 4.36542053e+01, + 4.23794136e+01, 4.11226883e+01, 3.98840752e+01, + 3.86636314e+01, 3.74613953e+01, 3.62774239e+01, + 3.51117554e+01, 3.39644432e+01, 3.28355293e+01, + 3.17250557e+01, 3.06330719e+01, 2.95596161e+01, + 2.85047321e+01, 2.74684620e+01, 2.64508476e+01, + 2.54519272e+01, 2.44717426e+01, 2.35103283e+01, + 2.25677280e+01, 2.16439743e+01, 2.07391052e+01, + 1.98531570e+01, 1.89861641e+01, 1.81381607e+01, + 1.73091812e+01, 1.64992561e+01, 1.57084198e+01, + 1.49367018e+01, 1.41841335e+01, 1.34507446e+01, + 1.27365637e+01, 1.20416193e+01, 1.13659382e+01, + 1.07095480e+01, 1.00724735e+01, 9.45474148e+00, + 8.85637474e+00, 8.27739716e+00, 7.71783257e+00, + 7.17770243e+00, 6.65702772e+00, 6.15582991e+00, + 5.67412758e+00, 5.21194077e+00, 4.76928711e+00, + 4.34618425e+00, 3.94264936e+00, 3.55869770e+00, + 3.19434476e+00, 2.84960485e+00, 2.52449155e+00, + 2.21901774e+00, 1.93319547e+00, 1.66703594e+00, + 1.42054987e+00, 1.19374681e+00, 9.86635804e-01, + 7.99224913e-01, 6.31521702e-01, 4.83532667e-01, + 3.55263680e-01, 2.46719822e-01, 1.57905355e-01, + 8.88238102e-02, 3.94778959e-02, 9.86957178e-03, + 0.00000000e+00, 9.86957271e-03, 3.94778997e-02, + 8.88238102e-02, 1.57905355e-01, 2.46719822e-01, + 3.55263680e-01, 4.83532667e-01, 6.31521702e-01, + 7.99224973e-01, 9.86635804e-01, 1.19374681e+00, + 1.42054987e+00, 1.66703594e+00, 1.93319547e+00, + 2.21901774e+00, 2.52449155e+00, 2.84960485e+00, + 3.19434476e+00, 3.55869770e+00, 3.94264936e+00, + 4.34618425e+00, 4.76928711e+00, 5.21194077e+00, + 5.67412758e+00, 6.15582991e+00, 6.65702772e+00, + 7.17770243e+00, 7.71783257e+00, 8.27739716e+00, + 8.85637474e+00, 9.45474148e+00, 1.00724735e+01, + 1.07095480e+01, 1.13659382e+01, 1.20416193e+01, + 1.27365637e+01, 1.34507446e+01, 1.41841335e+01, + 1.49367018e+01, 1.57084198e+01, 1.64992561e+01, + 1.73091812e+01, 1.81381607e+01, 1.89861641e+01, + 1.98531570e+01, 2.07391052e+01, 2.16439743e+01, + 2.25677280e+01, 2.35103302e+01, 2.44717426e+01, + 2.54519272e+01, 2.64508476e+01, 2.74684620e+01, + 2.85047321e+01, 2.95596161e+01, 3.06330719e+01, + 3.17250576e+01, 3.28355293e+01, 3.39644432e+01, + 3.51117554e+01, 3.62774239e+01, 3.74613953e+01, + 3.86636314e+01, 3.98840752e+01, 4.11226883e+01, + 4.23794136e+01, 4.36542053e+01, 4.49470139e+01, + 4.62577896e+01, 4.75864754e+01, 4.89330215e+01, + 5.02973747e+01, 5.16794815e+01, 5.30792885e+01, + 5.44967384e+01, 5.59317741e+01, 5.73843460e+01, + 5.88543854e+01, 6.03418465e+01, 6.18466606e+01, + 6.33687744e+01, 6.49081192e+01, 6.64646530e+01, + 6.80382919e+01, 6.96289902e+01, 7.12366714e+01, + 7.28612823e+01, 7.45027618e+01, 7.61610336e+01, + 7.78360367e+01, 7.95277100e+01, 8.12359772e+01, + 8.29607849e+01, 8.47020493e+01, 8.64597092e+01, + 8.82337036e+01, 9.00239487e+01, 9.18303757e+01, + 9.36529160e+01, 9.54915009e+01, 9.73460541e+01, + 9.92165070e+01, 1.01102783e+02, 1.03004799e+02, + 1.04922493e+02, 1.06855782e+02, 1.08804596e+02, + 1.10768852e+02, 1.12748466e+02, 1.14743378e+02, + 1.16753494e+02, 1.18778748e+02, 1.20819046e+02, + 1.22874313e+02, 1.24944466e+02, 1.27029427e+02, + 1.29129120e+02, 1.31243439e+02, 1.33372330e+02, + 1.35515686e+02, 1.37673431e+02, 1.39845490e+02, + 1.42031754e+02, 1.44232162e+02, 1.46446609e+02, + 1.48675018e+02, 1.50917297e+02, 1.53173340e+02, + 1.55443100e+02, 1.57726440e+02, 1.60023315e+02, + 1.62333603e+02, 1.64657211e+02, 1.66994064e+02, + 1.69344070e+02, 1.71707123e+02, 1.74083130e+02, + 1.76472015e+02, 1.78873672e+02, 1.81288010e+02, + 1.83714920e+02, 1.86154312e+02, 1.88606110e+02, + 1.91070190e+02, 1.93546478e+02, 1.96034851e+02, + 1.98535233e+02, 2.01047516e+02, 2.03571594e+02, + 2.06107376e+02, 2.08654755e+02, 2.11213654e+02, + 2.13783936e+02, 2.16365524e+02, 2.18958313e+02, + 2.21562195e+02, 2.24177063e+02, 2.26802826e+02, + 2.29439377e+02, 2.32086609e+02, 2.34744415e+02, + 2.37412689e+02, 2.40091324e+02, 2.42780228e+02, + 2.45479294e+02, 2.48188400e+02, 2.50907440e+02, + 2.53636322e+02, 2.56374939e+02, 2.59123169e+02, + 2.61880890e+02, 2.64648041e+02, 2.67424469e+02, + 2.70210083e+02, 2.73004761e+02, 2.75808380e+02, + 2.78620880e+02, 2.81442108e+02, 2.84271973e+02, + 2.87110352e+02, 2.89957123e+02, 2.92812195e+02, + 2.95675476e+02, 2.98546783e+02, 3.01426056e+02, + 3.04313171e+02, 3.07208008e+02, 3.10110443e+02, + 3.13020386e+02, 3.15937714e+02, 3.18862305e+02, + 3.21794067e+02, 3.24732849e+02, 3.27678528e+02, + 3.30631042e+02, 3.33590240e+02, 3.36556000e+02, + 3.39528198e+02, 3.42506744e+02, 3.45491516e+02, + 3.48482361e+02, 3.51479218e+02, 3.54481903e+02, + 3.57490356e+02, 3.60504456e+02, 3.63524017e+02, + 3.66549011e+02, 3.69579254e+02, 3.72614624e+02, + 3.75655060e+02, 3.78700378e+02, 3.81750488e+02, + 3.84805298e+02, 3.87864624e+02, 3.90928375e+02, + 3.93996460e+02, 3.97068695e+02, 4.00145020e+02, + 4.03225281e+02, 4.06309357e+02, 4.09397125e+02, + 4.12488464e+02, 4.15583282e+02, 4.18681427e+02, + 4.21782776e+02, 4.24887207e+02, 4.27994598e+02, + 4.31104858e+02, 4.34217834e+02, 4.37333374e+02, + 4.40451416e+02, 4.43571808e+02, 4.46694427e+02, + 4.49819153e+02, 4.52945831e+02, 4.56074402e+02, + 4.59204681e+02, 4.62336609e+02, 4.65470001e+02, + 4.68604736e+02, 4.71740723e+02, 4.74877838e+02, + 4.78015930e+02, 4.81154907e+02, 4.84294617e+02, + 4.87434967e+02, 4.90575775e+02, 4.93716980e+02, + 4.96858429e+02, 5.00000000e+02, 5.03141571e+02, + 5.06283020e+02, 5.09424225e+02], dtype=np.float32), +'ASAM.M.SCALAR.UWORD.IDENTICAL': np.array([ 32, 33, 35, 36, 37, 38, 39, 41, 42, 43, 44, + 46, 47, 48, 50, 51, 53, 54, 55, 57, 58, 60, + 61, 63, 64, 66, 68, 69, 71, 72, 74, 76, 77, + 79, 81, 82, 84, 86, 88, 90, 91, 93, 95, 97, + 99, 101, 103, 104, 106, 108, 110, 112, 114, 116, 118, + 120, 122, 124, 127, 129, 131, 133, 135, 137, 139, 142, + 144, 146, 148, 150, 153, 155, 157, 160, 162, 164, 166, + 169, 171, 174, 176, 178, 181, 183, 186, 188, 191, 193, + 196, 198, 201, 203, 206, 208, 211, 213, 216, 218, 221, + 224, 226, 229, 232, 234, 237, 240, 242, 245, 248, 250, + 253, 256, 259, 261, 264, 267, 270, 273, 275, 278, 281, + 284, 287, 289, 292, 295, 298, 301, 304, 307, 310, 313, + 315, 318, 321, 324, 327, 330, 333, 336, 339, 342, 345, + 348, 351, 354, 357, 360, 363, 366, 369, 372, 375, 378, + 381, 384, 387, 390, 393, 397, 400, 403, 406, 409, 412, + 415, 418, 421, 424, 427, 431, 434, 437, 440, 443, 446, + 449, 452, 456, 459, 462, 465, 468, 471, 474, 478, 481, + 484, 487, 490, 493, 496, 500, 503, 506, 509, 512, 515, + 518, 521, 525, 528, 531, 534, 537, 540, 543, 547, 550, + 553, 556, 559, 562, 565, 568, 572, 575, 578, 581, 584, + 587, 590, 593, 596, 599, 602, 606, 609, 612, 615, 618, + 621, 624, 627, 630, 633, 636, 639, 642, 645, 648, 651, + 654, 657, 660, 663, 666, 669, 672, 675, 678, 681, 684, + 686, 689, 692, 695, 698, 701, 704, 707, 710, 712, 715, + 718, 721, 724, 726, 729, 732, 735, 738, 740, 743, 746, + 749, 751, 754, 757, 759, 762, 765, 767, 770, 773, 775, + 778, 781, 783, 786, 788, 791, 793, 796, 798, 801, 803, + 806, 808, 811, 813, 816, 818, 821, 823, 825, 828, 830, + 833, 835, 837, 839, 842, 844, 846, 849, 851, 853, 855, + 857, 860, 862, 864, 866, 868, 870, 872, 875, 877, 879, + 881, 883, 885, 887, 889, 891, 893, 895, 896, 898, 900, + 902, 904, 906, 908, 909, 911, 913, 915, 917, 918, 920, + 922, 923, 925, 927, 928, 930, 931, 933, 935, 936, 938, + 939, 941, 942, 944, 945, 946, 948, 949, 951, 952, 953, + 955, 956, 957, 958, 960, 961, 962, 963, 964, 966, 967, + 968, 969, 970, 971, 972, 973, 974, 975, 976, 977, 978, + 979, 980, 981, 981, 982, 983, 984, 985, 985, 986, 987, + 987, 988, 989, 989, 990, 991, 991, 992, 992, 993, 993, + 994, 994, 995, 995, 996, 996, 996, 997, 997, 997, 998, + 998, 998, 998, 999, 999, 999, 999, 999, 999, 999, 999, + 999, 999, 1000, 999, 999, 999, 999, 999, 999, 999, 999, + 999, 999, 998, 998, 998, 998, 997, 997, 997, 996, 996, + 996, 995, 995, 994, 994, 993, 993, 992, 992, 991, 991, + 990, 989, 989, 988, 987, 987, 986, 985, 985, 984, 983, + 982, 981, 981, 980, 979, 978, 977, 976, 975, 974, 973, + 972, 971, 970, 969, 968, 967, 966, 964, 963, 962, 961, + 960, 958, 957, 956, 955, 953, 952, 951, 949, 948, 946, + 945, 944, 942, 941, 939, 938, 936, 935, 933, 931, 930, + 928, 927, 925, 923, 922, 920, 918, 917, 915, 913, 911, + 909, 908, 906, 904, 902, 900, 898, 896, 895, 893, 891, + 889, 887, 885, 883, 881, 879, 877, 875, 872, 870, 868, + 866, 864, 862, 860, 857, 855, 853, 851, 849, 846, 844, + 842, 839, 837, 835, 833, 830, 828, 825, 823, 821, 818, + 816, 813, 811, 808, 806, 803, 801, 798, 796, 793, 791, + 788, 786, 783, 781, 778, 775, 773, 770, 767, 765, 762, + 759, 757, 754, 751, 749, 746, 743, 740, 738, 735, 732, + 729, 726, 724, 721, 718, 715, 712, 710, 707, 704, 701, + 698, 695, 692, 689, 686, 684, 681, 678, 675, 672, 669, + 666, 663, 660, 657, 654, 651, 648, 645, 642, 639, 636, + 633, 630, 627, 624, 621, 618, 615, 612, 609, 606, 602, + 599, 596, 593, 590, 587, 584, 581, 578, 575, 572, 568, + 565, 562, 559, 556, 553, 550, 547, 543, 540, 537, 534, + 531, 528, 525, 521, 518, 515, 512, 509, 506, 503, 499, + 496, 493, 490, 487, 484, 481, 478, 474, 471, 468, 465, + 462, 459, 456, 452, 449, 446, 443, 440, 437, 434, 431, + 427, 424, 421, 418, 415, 412, 409, 406, 403, 400, 397, + 393, 390, 387, 384, 381, 378, 375, 372, 369, 366, 363, + 360, 357, 354, 351, 348, 345, 342, 339, 336, 333, 330, + 327, 324, 321, 318, 315, 313, 310, 307, 304, 301, 298, + 295, 292, 289, 287, 284, 281, 278, 275, 273, 270, 267, + 264, 261, 259, 256, 253, 250, 248, 245, 242, 240, 237, + 234, 232, 229, 226, 224, 221, 218, 216, 213, 211, 208, + 206, 203, 201, 198, 196, 193, 191, 188, 186, 183, 181, + 178, 176, 174, 171, 169, 166, 164, 162, 160, 157, 155, + 153, 150, 148, 146, 144, 142, 139, 137, 135, 133, 131, + 129, 127, 124, 122, 120, 118, 116, 114, 112, 110, 108, + 106, 104, 103, 101, 99, 97, 95, 93, 91, 90, 88, + 86, 84, 82, 81, 79, 77, 76, 74, 72, 71, 69, + 68, 66, 64, 63, 61, 60, 58, 57, 55, 54, 53, + 51, 50, 48, 47, 46, 44, 43, 42, 41, 39, 38, + 37, 36, 35, 33, 32, 31, 30, 29, 28, 27, 26, + 25, 24, 23, 22, 21], dtype=np.uint16), +'ASAM.M.SCALAR.UBYTE.TAB_VERB_NO_DEFAULT_VALUE': np.array([ 17, 16, 15, 14, 14, 13, 12, 12, 11, 10, 10, 9, 8, + 8, 7, 7, 6, 6, 5, 5, 4, 4, 3, 3, 3, 2, + 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, + 5, 5, 6, 6, 7, 7, 8, 8, 9, 10, 10, 11, 12, + 12, 13, 14, 14, 15, 16, 17, 18, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 35, 36, + 37, 38, 39, 41, 42, 43, 44, 46, 47, 48, 50, 51, 53, + 54, 55, 57, 58, 60, 61, 63, 64, 66, 68, 69, 71, 72, + 74, 76, 77, 79, 81, 82, 84, 86, 88, 90, 91, 93, 95, + 97, 99, 101, 103, 104, 106, 108, 110, 112, 114, 116, 118, 120, + 122, 124, 127, 129, 131, 133, 135, 137, 139, 142, 144, 146, 148, + 150, 153, 155, 157, 160, 162, 164, 166, 169, 171, 174, 176, 178, + 181, 183, 186, 188, 191, 193, 196, 198, 201, 203, 206, 208, 211, + 213, 216, 218, 221, 224, 226, 229, 232, 234, 237, 240, 242, 245, + 248, 250, 253, 0, 3, 5, 8, 11, 14, 17, 19, 22, 25, + 28, 31, 33, 36, 39, 42, 45, 48, 51, 54, 57, 59, 62, + 65, 68, 71, 74, 77, 80, 83, 86, 89, 92, 95, 98, 101, + 104, 107, 110, 113, 116, 119, 122, 125, 128, 131, 134, 137, 141, + 144, 147, 150, 153, 156, 159, 162, 165, 168, 171, 175, 178, 181, + 184, 187, 190, 193, 196, 200, 203, 206, 209, 212, 215, 218, 222, + 225, 228, 231, 234, 237, 240, 244, 247, 250, 253, 0, 3, 6, + 9, 13, 16, 19, 22, 25, 28, 31, 35, 38, 41, 44, 47, + 50, 53, 56, 60, 63, 66, 69, 72, 75, 78, 81, 84, 87, + 90, 94, 97, 100, 103, 106, 109, 112, 115, 118, 121, 124, 127, + 130, 133, 136, 139, 142, 145, 148, 151, 154, 157, 160, 163, 166, + 169, 172, 174, 177, 180, 183, 186, 189, 192, 195, 198, 200, 203, + 206, 209, 212, 214, 217, 220, 223, 226, 228, 231, 234, 237, 239, + 242, 245, 247, 250, 253, 255, 2, 5, 7, 10, 13, 15, 18, + 20, 23, 25, 28, 30, 33, 35, 38, 40, 43, 45, 48, 50, + 53, 55, 57, 60, 62, 65, 67, 69, 71, 74, 76, 78, 81, + 83, 85, 87, 89, 92, 94, 96, 98, 100, 102, 104, 107, 109, + 111, 113, 115, 117, 119, 121, 123, 125, 127, 128, 130, 132, 134, + 136, 138, 140, 141, 143, 145, 147, 149, 150, 152, 154, 155, 157, + 159, 160, 162, 163, 165, 167, 168, 170, 171, 173, 174, 176, 177, + 178, 180, 181, 183, 184, 185, 187, 188, 189, 190, 192, 193, 194, + 195, 196, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 213, 214, 215, 216, 217, 217, 218, 219, + 219, 220, 221, 221, 222, 223, 223, 224, 224, 225, 225, 226, 226, + 227, 227, 228, 228, 228, 229, 229, 229, 230, 230, 230, 230, 231, + 231, 231, 231, 231, 231, 231, 231, 231, 231, 232, 231, 231, 231, + 231, 231, 231, 231, 231, 231, 231, 230, 230, 230, 230, 229, 229, + 229, 228, 228, 228, 227, 227, 226, 226, 225, 225, 224, 224, 223, + 223, 222, 221, 221, 220, 219, 219, 218, 217, 217, 216, 215, 214, + 213, 213, 212, 211, 210, 209, 208, 207, 206, 205, 204, 203, 202, + 201, 200, 199, 198, 196, 195, 194, 193, 192, 190, 189, 188, 187, + 185, 184, 183, 181, 180, 178, 177, 176, 174, 173, 171, 170, 168, + 167, 165, 163, 162, 160, 159, 157, 155, 154, 152, 150, 149, 147, + 145, 143, 141, 140, 138, 136, 134, 132, 130, 128, 127, 125, 123, + 121, 119, 117, 115, 113, 111, 109, 107, 104, 102, 100, 98, 96, + 94, 92, 89, 87, 85, 83, 81, 78, 76, 74, 71, 69, 67, + 65, 62, 60, 57, 55, 53, 50, 48, 45, 43, 40, 38, 35, + 33, 30, 28, 25, 23, 20, 18, 15, 13, 10, 7, 5, 2, + 255, 253, 250, 247, 245, 242, 239, 237, 234, 231, 228, 226, 223, + 220, 217, 214, 212, 209, 206, 203, 200, 198, 195, 192, 189, 186, + 183, 180, 177, 174, 172, 169, 166, 163, 160, 157, 154, 151, 148, + 145, 142, 139, 136, 133, 130, 127, 124, 121, 118, 115, 112, 109, + 106, 103, 100, 97, 94, 90, 87, 84, 81, 78, 75, 72, 69, + 66, 63, 60, 56, 53, 50, 47, 44, 41, 38, 35, 31, 28, + 25, 22, 19, 16, 13, 9, 6, 3, 0, 253, 250, 247, 243, + 240, 237, 234, 231, 228, 225, 222, 218, 215, 212, 209, 206, 203, + 200, 196, 193, 190, 187, 184, 181, 178, 175, 171, 168, 165, 162, + 159, 156, 153, 150, 147, 144, 141, 137, 134, 131, 128, 125, 122, + 119, 116, 113, 110, 107, 104, 101, 98, 95, 92, 89, 86, 83, + 80, 77, 74, 71, 68, 65, 62, 59, 57, 54, 51, 48, 45, + 42, 39, 36, 33, 31, 28, 25, 22, 19, 17, 14, 11, 8, + 5, 3, 0, 253, 250, 248, 245, 242, 240, 237, 234, 232, 229, + 226, 224, 221, 218, 216, 213, 211, 208, 206, 203, 201, 198], dtype=np.uint8), +'ASAM.M.SCALAR.UBYTE.TAB_NOINTP_NO_DEFAULT_VALUE': np.array([ 105. , 102. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 109. , 106. , 103. , + 100. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 110.33333588, 108. , 105. , + 103. , 100. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 110.66666412, 110.66666412, + 110.33333588, 110. , 110. , 109. , + 108. , 108. , 107. , 107. , + 106. , 106. , 105. , 105. , + 104. , 104. , 103. , 103. , + 103. , 102. , 102. , 102. , + 101. , 101. , 101. , 101. , + 100. , 100. , 100. , 100. , + 100. , 100. , 100. , 100. , + 100. , 100. , 100. , 100. , + 100. , 100. , 100. , 100. , + 100. , 100. , 100. , 100. , + 100. , 101. , 101. , 101. , + 101. , 102. , 102. , 102. , + 103. , 103. , 103. , 104. , + 104. , 105. , 105. , 106. , + 106. , 107. , 107. , 108. , + 108. , 109. , 110. , 110. , + 110.33333588, 110.66666412, 110.66666412, 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 100. , + 103. , 105. , 108. , 110.33333588, + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 100. , 103. , 106. , 109. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 102. , + 105. , 107. , 110. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. , + 111. , 111. , 111. , 111. ], dtype=np.float32), +'ASAM.M.SCALAR.UBYTE.TAB_VERB_DEFAULT_VALUE': np.array([218, 215, 212, 209, 206, 203, 200, 196, 193, 190, 187, 184, 181, + 178, 175, 171, 168, 165, 162, 159, 156, 153, 150, 147, 144, 141, + 137, 134, 131, 128, 125, 122, 119, 116, 113, 110, 107, 104, 101, + 98, 95, 92, 89, 86, 83, 80, 77, 74, 71, 68, 65, 62, + 59, 57, 54, 51, 48, 45, 42, 39, 36, 33, 31, 28, 25, + 22, 19, 17, 14, 11, 8, 5, 3, 0, 253, 250, 248, 245, + 242, 240, 237, 234, 232, 229, 226, 224, 221, 218, 216, 213, 211, + 208, 206, 203, 201, 198, 196, 193, 191, 188, 186, 183, 181, 178, + 176, 174, 171, 169, 166, 164, 162, 160, 157, 155, 153, 150, 148, + 146, 144, 142, 139, 137, 135, 133, 131, 129, 127, 124, 122, 120, + 118, 116, 114, 112, 110, 108, 106, 104, 103, 101, 99, 97, 95, + 93, 91, 90, 88, 86, 84, 82, 81, 79, 77, 76, 74, 72, + 71, 69, 68, 66, 64, 63, 61, 60, 58, 57, 55, 54, 53, + 51, 50, 48, 47, 46, 44, 43, 42, 41, 39, 38, 37, 36, + 35, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, + 21, 20, 19, 18, 18, 17, 16, 15, 14, 14, 13, 12, 12, + 11, 10, 10, 9, 8, 8, 7, 7, 6, 6, 5, 5, 4, + 4, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, + 3, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 17, 18, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 35, 36, 37, 38, 39, 41, 42, 43, 44, 46, + 47, 48, 50, 51, 53, 54, 55, 57, 58, 60, 61, 63, 64, + 66, 68, 69, 71, 72, 74, 76, 77, 79, 81, 82, 84, 86, + 88, 90, 91, 93, 95, 97, 99, 101, 103, 104, 106, 108, 110, + 112, 114, 116, 118, 120, 122, 124, 127, 129, 131, 133, 135, 137, + 139, 142, 144, 146, 148, 150, 153, 155, 157, 160, 162, 164, 166, + 169, 171, 174, 176, 178, 181, 183, 186, 188, 191, 193, 196, 198, + 201, 203, 206, 208, 211, 213, 216, 218, 221, 224, 226, 229, 232, + 234, 237, 240, 242, 245, 248, 250, 253, 0, 3, 5, 8, 11, + 14, 17, 19, 22, 25, 28, 31, 33, 36, 39, 42, 45, 48, + 51, 54, 57, 59, 62, 65, 68, 71, 74, 77, 80, 83, 86, + 89, 92, 95, 98, 101, 104, 107, 110, 113, 116, 119, 122, 125, + 128, 131, 134, 137, 141, 144, 147, 150, 153, 156, 159, 162, 165, + 168, 171, 175, 178, 181, 184, 187, 190, 193, 196, 200, 203, 206, + 209, 212, 215, 218, 222, 225, 228, 231, 234, 237, 240, 244, 247, + 250, 253, 0, 3, 6, 9, 13, 16, 19, 22, 25, 28, 31, + 35, 38, 41, 44, 47, 50, 53, 56, 60, 63, 66, 69, 72, + 75, 78, 81, 84, 87, 90, 94, 97, 100, 103, 106, 109, 112, + 115, 118, 121, 124, 127, 130, 133, 136, 139, 142, 145, 148, 151, + 154, 157, 160, 163, 166, 169, 172, 174, 177, 180, 183, 186, 189, + 192, 195, 198, 200, 203, 206, 209, 212, 214, 217, 220, 223, 226, + 228, 231, 234, 237, 239, 242, 245, 247, 250, 253, 255, 2, 5, + 7, 10, 13, 15, 18, 20, 23, 25, 28, 30, 33, 35, 38, + 40, 43, 45, 48, 50, 53, 55, 57, 60, 62, 65, 67, 69, + 71, 74, 76, 78, 81, 83, 85, 87, 89, 92, 94, 96, 98, + 100, 102, 104, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, + 127, 128, 130, 132, 134, 136, 138, 140, 141, 143, 145, 147, 149, + 150, 152, 154, 155, 157, 159, 160, 162, 163, 165, 167, 168, 170, + 171, 173, 174, 176, 177, 178, 180, 181, 183, 184, 185, 187, 188, + 189, 190, 192, 193, 194, 195, 196, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 213, 214, 215, + 216, 217, 217, 218, 219, 219, 220, 221, 221, 222, 223, 223, 224, + 224, 225, 225, 226, 226, 227, 227, 228, 228, 228, 229, 229, 229, + 230, 230, 230, 230, 231, 231, 231, 231, 231, 231, 231, 231, 231, + 231, 232, 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, 230, + 230, 230, 230, 229, 229, 229, 228, 228, 228, 227, 227, 226, 226, + 225, 225, 224, 224, 223, 223, 222, 221, 221, 220, 219, 219, 218, + 217, 217, 216, 215, 214, 213, 213, 212, 211, 210, 209, 208, 207, + 206, 205, 204, 203, 202, 201, 200, 199, 198, 196, 195, 194, 193, + 192, 190, 189, 188, 187, 185, 184, 183, 181, 180, 178, 177, 176, + 174, 173, 171, 170, 168, 167, 165, 163, 162, 160, 159, 157, 155, + 154, 152, 150, 149, 147, 145, 143, 141, 140, 138, 136, 134, 132, + 130, 128, 127, 125, 123, 121, 119, 117, 115, 113, 111, 109, 107, + 104, 102, 100, 98, 96, 94, 92, 89, 87, 85, 83, 81, 78, + 76, 74, 71, 69, 67, 65, 62, 60, 57, 55, 53, 50, 48, + 45, 43, 40, 38, 35, 33, 30, 28, 25, 23, 20, 18], dtype=np.uint8), +'ASAM.M.SCALAR.ULONG.IDENTICAL': np.array([ 186, 183, 181, 178, 176, 174, 171, 169, 166, 164, 162, + 160, 157, 155, 153, 150, 148, 146, 144, 142, 139, 137, + 135, 133, 131, 129, 127, 124, 122, 120, 118, 116, 114, + 112, 110, 108, 106, 104, 103, 101, 99, 97, 95, 93, + 91, 90, 88, 86, 84, 82, 81, 79, 77, 76, 74, + 72, 71, 69, 68, 66, 64, 63, 61, 60, 58, 57, + 55, 54, 53, 51, 50, 48, 47, 46, 44, 43, 42, + 41, 39, 38, 37, 36, 35, 33, 32, 31, 30, 29, + 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, + 18, 17, 16, 15, 14, 14, 13, 12, 12, 11, 10, + 10, 9, 8, 8, 7, 7, 6, 6, 5, 5, 4, + 4, 3, 3, 3, 2, 2, 2, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, + 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, + 5, 5, 6, 6, 7, 7, 8, 8, 9, 10, 10, + 11, 12, 12, 13, 14, 14, 15, 16, 17, 18, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 35, 36, 37, 38, 39, 41, 42, + 43, 44, 46, 47, 48, 50, 51, 53, 54, 55, 57, + 58, 60, 61, 63, 64, 66, 68, 69, 71, 72, 74, + 76, 77, 79, 81, 82, 84, 86, 88, 90, 91, 93, + 95, 97, 99, 101, 103, 104, 106, 108, 110, 112, 114, + 116, 118, 120, 122, 124, 127, 129, 131, 133, 135, 137, + 139, 142, 144, 146, 148, 150, 153, 155, 157, 160, 162, + 164, 166, 169, 171, 174, 176, 178, 181, 183, 186, 188, + 191, 193, 196, 198, 201, 203, 206, 208, 211, 213, 216, + 218, 221, 224, 226, 229, 232, 234, 237, 240, 242, 245, + 248, 250, 253, 256, 259, 261, 264, 267, 270, 273, 275, + 278, 281, 284, 287, 289, 292, 295, 298, 301, 304, 307, + 310, 313, 315, 318, 321, 324, 327, 330, 333, 336, 339, + 342, 345, 348, 351, 354, 357, 360, 363, 366, 369, 372, + 375, 378, 381, 384, 387, 390, 393, 397, 400, 403, 406, + 409, 412, 415, 418, 421, 424, 427, 431, 434, 437, 440, + 443, 446, 449, 452, 456, 459, 462, 465, 468, 471, 474, + 478, 481, 484, 487, 490, 493, 496, 500, 503, 506, 509, + 512, 515, 518, 521, 525, 528, 531, 534, 537, 540, 543, + 547, 550, 553, 556, 559, 562, 565, 568, 572, 575, 578, + 581, 584, 587, 590, 593, 596, 599, 602, 606, 609, 612, + 615, 618, 621, 624, 627, 630, 633, 636, 639, 642, 645, + 648, 651, 654, 657, 660, 663, 666, 669, 672, 675, 678, + 681, 684, 686, 689, 692, 695, 698, 701, 704, 707, 710, + 712, 715, 718, 721, 724, 726, 729, 732, 735, 738, 740, + 743, 746, 749, 751, 754, 757, 759, 762, 765, 767, 770, + 773, 775, 778, 781, 783, 786, 788, 791, 793, 796, 798, + 801, 803, 806, 808, 811, 813, 816, 818, 821, 823, 825, + 828, 830, 833, 835, 837, 839, 842, 844, 846, 849, 851, + 853, 855, 857, 860, 862, 864, 866, 868, 870, 872, 875, + 877, 879, 881, 883, 885, 887, 889, 891, 893, 895, 896, + 898, 900, 902, 904, 906, 908, 909, 911, 913, 915, 917, + 918, 920, 922, 923, 925, 927, 928, 930, 931, 933, 935, + 936, 938, 939, 941, 942, 944, 945, 946, 948, 949, 951, + 952, 953, 955, 956, 957, 958, 960, 961, 962, 963, 964, + 966, 967, 968, 969, 970, 971, 972, 973, 974, 975, 976, + 977, 978, 979, 980, 981, 981, 982, 983, 984, 985, 985, + 986, 987, 987, 988, 989, 989, 990, 991, 991, 992, 992, + 993, 993, 994, 994, 995, 995, 996, 996, 996, 997, 997, + 997, 998, 998, 998, 998, 999, 999, 999, 999, 999, 999, + 999, 999, 999, 999, 1000, 999, 999, 999, 999, 999, 999, + 999, 999, 999, 999, 998, 998, 998, 998, 997, 997, 997, + 996, 996, 996, 995, 995, 994, 994, 993, 993, 992, 992, + 991, 991, 990, 989, 989, 988, 987, 987, 986, 985, 985, + 984, 983, 982, 981, 981, 980, 979, 978, 977, 976, 975, + 974, 973, 972, 971, 970, 969, 968, 967, 966, 964, 963, + 962, 961, 960, 958, 957, 956, 955, 953, 952, 951, 949, + 948, 946, 945, 944, 942, 941, 939, 938, 936, 935, 933, + 931, 930, 928, 927, 925, 923, 922, 920, 918, 917, 915, + 913, 911, 909, 908, 906, 904, 902, 900, 898, 896, 895, + 893, 891, 889, 887, 885, 883, 881, 879, 877, 875, 872, + 870, 868, 866, 864, 862, 860, 857, 855, 853, 851, 849, + 846, 844, 842, 839, 837, 835, 833, 830, 828, 825, 823, + 821, 818, 816, 813, 811, 808, 806, 803, 801, 798, 796, + 793, 791, 788, 786, 783, 781, 778, 775, 773, 770, 767, + 765, 762, 759, 757, 754, 751, 749, 746, 743, 740, 738, + 735, 732, 729, 726, 724, 721, 718, 715, 712, 710, 707, + 704, 701, 698, 695, 692, 689, 686, 684, 681, 678, 675, + 672, 669, 666, 663, 660, 657, 654, 651, 648, 645, 642, + 639, 636, 633, 630, 627, 624, 621, 618, 615, 612, 609, + 606, 602, 599, 596, 593, 590, 587, 584, 581, 578, 575, + 572, 568, 565, 562, 559, 556, 553, 550, 547, 543, 540, + 537, 534, 531, 528, 525, 521, 518, 515, 512, 509, 506, + 503, 499, 496, 493, 490], dtype=np.uint32), +'ASAM.M.SCALAR.UBYTE.VTAB_RANGE_NO_DEFAULT_VALUE': np.array([199, 198, 196, 195, 194, 193, 192, 190, 189, 188, 187, 185, 184, + 183, 181, 180, 178, 177, 176, 174, 173, 171, 170, 168, 167, 165, + 163, 162, 160, 159, 157, 155, 154, 152, 150, 149, 147, 145, 143, + 141, 140, 138, 136, 134, 132, 130, 128, 127, 125, 123, 121, 119, + 117, 115, 113, 111, 109, 107, 104, 102, 100, 98, 96, 94, 92, + 89, 87, 85, 83, 81, 78, 76, 74, 71, 69, 67, 65, 62, + 60, 57, 55, 53, 50, 48, 45, 43, 40, 38, 35, 33, 30, + 28, 25, 23, 20, 18, 15, 13, 10, 7, 5, 2, 255, 253, + 250, 247, 245, 242, 239, 237, 234, 231, 228, 226, 223, 220, 217, + 214, 212, 209, 206, 203, 200, 198, 195, 192, 189, 186, 183, 180, + 177, 174, 172, 169, 166, 163, 160, 157, 154, 151, 148, 145, 142, + 139, 136, 133, 130, 127, 124, 121, 118, 115, 112, 109, 106, 103, + 100, 97, 94, 90, 87, 84, 81, 78, 75, 72, 69, 66, 63, + 60, 56, 53, 50, 47, 44, 41, 38, 35, 31, 28, 25, 22, + 19, 16, 13, 9, 6, 3, 0, 253, 250, 247, 243, 240, 237, + 234, 231, 228, 225, 222, 218, 215, 212, 209, 206, 203, 200, 196, + 193, 190, 187, 184, 181, 178, 175, 171, 168, 165, 162, 159, 156, + 153, 150, 147, 144, 141, 137, 134, 131, 128, 125, 122, 119, 116, + 113, 110, 107, 104, 101, 98, 95, 92, 89, 86, 83, 80, 77, + 74, 71, 68, 65, 62, 59, 57, 54, 51, 48, 45, 42, 39, + 36, 33, 31, 28, 25, 22, 19, 17, 14, 11, 8, 5, 3, + 0, 253, 250, 248, 245, 242, 240, 237, 234, 232, 229, 226, 224, + 221, 218, 216, 213, 211, 208, 206, 203, 201, 198, 196, 193, 191, + 188, 186, 183, 181, 178, 176, 174, 171, 169, 166, 164, 162, 160, + 157, 155, 153, 150, 148, 146, 144, 142, 139, 137, 135, 133, 131, + 129, 127, 124, 122, 120, 118, 116, 114, 112, 110, 108, 106, 104, + 103, 101, 99, 97, 95, 93, 91, 90, 88, 86, 84, 82, 81, + 79, 77, 76, 74, 72, 71, 69, 68, 66, 64, 63, 61, 60, + 58, 57, 55, 54, 53, 51, 50, 48, 47, 46, 44, 43, 42, + 41, 39, 38, 37, 36, 35, 33, 32, 31, 30, 29, 28, 27, + 26, 25, 24, 23, 22, 21, 20, 19, 18, 18, 17, 16, 15, + 14, 14, 13, 12, 12, 11, 10, 10, 9, 8, 8, 7, 7, + 6, 6, 5, 5, 4, 4, 3, 3, 3, 2, 2, 2, 1, + 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, + 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, + 6, 7, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, + 14, 15, 16, 17, 18, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, 35, 36, 37, 38, 39, + 41, 42, 43, 44, 46, 47, 48, 50, 51, 53, 54, 55, 57, + 58, 60, 61, 63, 64, 66, 68, 69, 71, 72, 74, 76, 77, + 79, 81, 82, 84, 86, 88, 90, 91, 93, 95, 97, 99, 101, + 103, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 127, + 129, 131, 133, 135, 137, 139, 142, 144, 146, 148, 150, 153, 155, + 157, 160, 162, 164, 166, 169, 171, 174, 176, 178, 181, 183, 186, + 188, 191, 193, 196, 198, 201, 203, 206, 208, 211, 213, 216, 218, + 221, 224, 226, 229, 232, 234, 237, 240, 242, 245, 248, 250, 253, + 0, 3, 5, 8, 11, 14, 17, 19, 22, 25, 28, 31, 33, + 36, 39, 42, 45, 48, 51, 54, 57, 59, 62, 65, 68, 71, + 74, 77, 80, 83, 86, 89, 92, 95, 98, 101, 104, 107, 110, + 113, 116, 119, 122, 125, 128, 131, 134, 137, 141, 144, 147, 150, + 153, 156, 159, 162, 165, 168, 171, 175, 178, 181, 184, 187, 190, + 193, 196, 200, 203, 206, 209, 212, 215, 218, 222, 225, 228, 231, + 234, 237, 240, 244, 247, 250, 253, 0, 3, 6, 9, 13, 16, + 19, 22, 25, 28, 31, 35, 38, 41, 44, 47, 50, 53, 56, + 60, 63, 66, 69, 72, 75, 78, 81, 84, 87, 90, 94, 97, + 100, 103, 106, 109, 112, 115, 118, 121, 124, 127, 130, 133, 136, + 139, 142, 145, 148, 151, 154, 157, 160, 163, 166, 169, 172, 174, + 177, 180, 183, 186, 189, 192, 195, 198, 200, 203, 206, 209, 212, + 214, 217, 220, 223, 226, 228, 231, 234, 237, 239, 242, 245, 247, + 250, 253, 255, 2, 5, 7, 10, 13, 15, 18, 20, 23, 25, + 28, 30, 33, 35, 38, 40, 43, 45, 48, 50, 53, 55, 57, + 60, 62, 65, 67, 69, 71, 74, 76, 78, 81, 83, 85, 87, + 89, 92, 94, 96, 98, 100, 102, 104, 107, 109, 111, 113, 115, + 117, 119, 121, 123, 125, 127, 128, 130, 132, 134, 136, 138, 140, + 141, 143, 145, 147, 149, 150, 152, 154, 155, 157, 159, 160, 162, + 163, 165, 167, 168, 170, 171, 173, 174, 176, 177, 178, 180, 181, + 183, 184, 185, 187, 188, 189, 190, 192, 193, 194, 195, 196, 198, + 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210], dtype=np.uint8), +'ASAM.M.SCALAR.FLOAT64.IDENTICAL': np.array([ 1.73091812e+01, 1.64992561e+01, 1.57084198e+01, + 1.49367018e+01, 1.41841335e+01, 1.34507446e+01, + 1.27365637e+01, 1.20416193e+01, 1.13659382e+01, + 1.07095480e+01, 1.00724735e+01, 9.45474148e+00, + 8.85637474e+00, 8.27739716e+00, 7.71783257e+00, + 7.17770243e+00, 6.65702772e+00, 6.15582991e+00, + 5.67412758e+00, 5.21194077e+00, 4.76928711e+00, + 4.34618425e+00, 3.94264936e+00, 3.55869770e+00, + 3.19434476e+00, 2.84960485e+00, 2.52449155e+00, + 2.21901774e+00, 1.93319547e+00, 1.66703594e+00, + 1.42054987e+00, 1.19374681e+00, 9.86635804e-01, + 7.99224973e-01, 6.31521702e-01, 4.83532667e-01, + 3.55263680e-01, 2.46719822e-01, 1.57905355e-01, + 8.88238102e-02, 3.94778997e-02, 9.86957178e-03, + 0.00000000e+00, 9.86957178e-03, 3.94778959e-02, + 8.88238102e-02, 1.57905355e-01, 2.46719822e-01, + 3.55263680e-01, 4.83532667e-01, 6.31521702e-01, + 7.99224973e-01, 9.86635804e-01, 1.19374681e+00, + 1.42054987e+00, 1.66703594e+00, 1.93319547e+00, + 2.21901774e+00, 2.52449155e+00, 2.84960485e+00, + 3.19434476e+00, 3.55869770e+00, 3.94264936e+00, + 4.34618425e+00, 4.76928711e+00, 5.21194077e+00, + 5.67412758e+00, 6.15582991e+00, 6.65702772e+00, + 7.17770243e+00, 7.71783257e+00, 8.27739716e+00, + 8.85637474e+00, 9.45474148e+00, 1.00724735e+01, + 1.07095480e+01, 1.13659382e+01, 1.20416193e+01, + 1.27365637e+01, 1.34507446e+01, 1.41841335e+01, + 1.49367018e+01, 1.57084198e+01, 1.64992561e+01, + 1.73091812e+01, 1.81381607e+01, 1.89861641e+01, + 1.98531570e+01, 2.07391052e+01, 2.16439743e+01, + 2.25677280e+01, 2.35103283e+01, 2.44717426e+01, + 2.54519272e+01, 2.64508476e+01, 2.74684620e+01, + 2.85047321e+01, 2.95596161e+01, 3.06330719e+01, + 3.17250557e+01, 3.28355293e+01, 3.39644432e+01, + 3.51117554e+01, 3.62774239e+01, 3.74613953e+01, + 3.86636314e+01, 3.98840752e+01, 4.11226883e+01, + 4.23794136e+01, 4.36542053e+01, 4.49470139e+01, + 4.62577896e+01, 4.75864754e+01, 4.89330215e+01, + 5.02973747e+01, 5.16794815e+01, 5.30792885e+01, + 5.44967384e+01, 5.59317741e+01, 5.73843460e+01, + 5.88543854e+01, 6.03418465e+01, 6.18466606e+01, + 6.33687744e+01, 6.49081192e+01, 6.64646530e+01, + 6.80382919e+01, 6.96289902e+01, 7.12366714e+01, + 7.28612823e+01, 7.45027618e+01, 7.61610336e+01, + 7.78360367e+01, 7.95277100e+01, 8.12359772e+01, + 8.29607849e+01, 8.47020493e+01, 8.64597092e+01, + 8.82337036e+01, 9.00239487e+01, 9.18303757e+01, + 9.36529160e+01, 9.54915009e+01, 9.73460541e+01, + 9.92165070e+01, 1.01102783e+02, 1.03004799e+02, + 1.04922493e+02, 1.06855782e+02, 1.08804596e+02, + 1.10768852e+02, 1.12748466e+02, 1.14743378e+02, + 1.16753494e+02, 1.18778748e+02, 1.20819046e+02, + 1.22874313e+02, 1.24944466e+02, 1.27029427e+02, + 1.29129120e+02, 1.31243439e+02, 1.33372330e+02, + 1.35515686e+02, 1.37673431e+02, 1.39845490e+02, + 1.42031754e+02, 1.44232162e+02, 1.46446609e+02, + 1.48675018e+02, 1.50917297e+02, 1.53173340e+02, + 1.55443100e+02, 1.57726440e+02, 1.60023315e+02, + 1.62333603e+02, 1.64657211e+02, 1.66994064e+02, + 1.69344070e+02, 1.71707123e+02, 1.74083130e+02, + 1.76472015e+02, 1.78873672e+02, 1.81288010e+02, + 1.83714920e+02, 1.86154312e+02, 1.88606110e+02, + 1.91070190e+02, 1.93546478e+02, 1.96034851e+02, + 1.98535233e+02, 2.01047516e+02, 2.03571594e+02, + 2.06107376e+02, 2.08654755e+02, 2.11213654e+02, + 2.13783936e+02, 2.16365524e+02, 2.18958313e+02, + 2.21562195e+02, 2.24177063e+02, 2.26802826e+02, + 2.29439377e+02, 2.32086609e+02, 2.34744415e+02, + 2.37412689e+02, 2.40091324e+02, 2.42780228e+02, + 2.45479294e+02, 2.48188400e+02, 2.50907440e+02, + 2.53636322e+02, 2.56374939e+02, 2.59123169e+02, + 2.61880890e+02, 2.64648041e+02, 2.67424469e+02, + 2.70210083e+02, 2.73004761e+02, 2.75808380e+02, + 2.78620880e+02, 2.81442108e+02, 2.84271973e+02, + 2.87110352e+02, 2.89957123e+02, 2.92812195e+02, + 2.95675476e+02, 2.98546783e+02, 3.01426056e+02, + 3.04313171e+02, 3.07208008e+02, 3.10110443e+02, + 3.13020386e+02, 3.15937714e+02, 3.18862305e+02, + 3.21794067e+02, 3.24732849e+02, 3.27678528e+02, + 3.30631042e+02, 3.33590240e+02, 3.36556000e+02, + 3.39528198e+02, 3.42506744e+02, 3.45491516e+02, + 3.48482361e+02, 3.51479218e+02, 3.54481903e+02, + 3.57490356e+02, 3.60504456e+02, 3.63524017e+02, + 3.66549011e+02, 3.69579254e+02, 3.72614624e+02, + 3.75655060e+02, 3.78700378e+02, 3.81750488e+02, + 3.84805298e+02, 3.87864624e+02, 3.90928375e+02, + 3.93996460e+02, 3.97068695e+02, 4.00145020e+02, + 4.03225281e+02, 4.06309357e+02, 4.09397125e+02, + 4.12488464e+02, 4.15583282e+02, 4.18681427e+02, + 4.21782776e+02, 4.24887207e+02, 4.27994598e+02, + 4.31104858e+02, 4.34217834e+02, 4.37333374e+02, + 4.40451416e+02, 4.43571808e+02, 4.46694427e+02, + 4.49819153e+02, 4.52945831e+02, 4.56074402e+02, + 4.59204681e+02, 4.62336609e+02, 4.65470001e+02, + 4.68604736e+02, 4.71740723e+02, 4.74877838e+02, + 4.78015930e+02, 4.81154907e+02, 4.84294617e+02, + 4.87434967e+02, 4.90575775e+02, 4.93716980e+02, + 4.96858429e+02, 5.00000000e+02, 5.03141571e+02, + 5.06283020e+02, 5.09424225e+02, 5.12565063e+02, + 5.15705383e+02, 5.18845093e+02, 5.21984070e+02, + 5.25122131e+02, 5.28259277e+02, 5.31395264e+02, + 5.34530029e+02, 5.37663391e+02, 5.40795288e+02, + 5.43925598e+02, 5.47054138e+02, 5.50180847e+02, + 5.53305603e+02, 5.56428223e+02, 5.59548584e+02, + 5.62666626e+02, 5.65782166e+02, 5.68895142e+02, + 5.72005371e+02, 5.75112793e+02, 5.78217224e+02, + 5.81318604e+02, 5.84416748e+02, 5.87511536e+02, + 5.90602905e+02, 5.93690674e+02, 5.96774719e+02, + 5.99854980e+02, 6.02931274e+02, 6.06003540e+02, + 6.09071594e+02, 6.12135376e+02, 6.15194702e+02, + 6.18249512e+02, 6.21299622e+02, 6.24344971e+02, + 6.27385376e+02, 6.30420776e+02, 6.33450989e+02, + 6.36475952e+02, 6.39495544e+02, 6.42509644e+02, + 6.45518066e+02, 6.48520813e+02, 6.51517639e+02, + 6.54508484e+02, 6.57493286e+02, 6.60471802e+02, + 6.63444031e+02, 6.66409790e+02, 6.69368958e+02, + 6.72321472e+02, 6.75267151e+02, 6.78205933e+02, + 6.81137695e+02, 6.84062256e+02, 6.86979614e+02, + 6.89889526e+02, 6.92791992e+02, 6.95686829e+02, + 6.98573975e+02, 7.01453247e+02, 7.04324524e+02, + 7.07187805e+02, 7.10042847e+02, 7.12889648e+02, + 7.15728027e+02, 7.18557861e+02, 7.21379089e+02, + 7.24191589e+02, 7.26995239e+02, 7.29789917e+02, + 7.32575562e+02, 7.35351990e+02, 7.38119080e+02, + 7.40876831e+02, 7.43625061e+02, 7.46363647e+02, + 7.49092529e+02, 7.51811584e+02, 7.54520691e+02, + 7.57219788e+02, 7.59908691e+02, 7.62587341e+02, + 7.65255615e+02, 7.67913391e+02, 7.70560608e+02, + 7.73197144e+02, 7.75822937e+02, 7.78437805e+02, + 7.81041687e+02, 7.83634460e+02, 7.86216064e+02, + 7.88786377e+02, 7.91345215e+02, 7.93892639e+02, + 7.96428406e+02, 7.98952515e+02, 8.01464783e+02, + 8.03965149e+02, 8.06453552e+02, 8.08929810e+02, + 8.11393860e+02, 8.13845703e+02, 8.16285095e+02, + 8.18711975e+02, 8.21126343e+02, 8.23527954e+02, + 8.25916870e+02, 8.28292908e+02, 8.30655945e+02, + 8.33005920e+02, 8.35342773e+02, 8.37666382e+02, + 8.39976685e+02, 8.42273560e+02, 8.44556885e+02, + 8.46826660e+02, 8.49082703e+02, 8.51325012e+02, + 8.53553406e+02, 8.55767822e+02, 8.57968262e+02, + 8.60154541e+02, 8.62326538e+02, 8.64484314e+02, + 8.66627686e+02, 8.68756531e+02, 8.70870911e+02, + 8.72970581e+02, 8.75055542e+02, 8.77125671e+02, + 8.79180969e+02, 8.81221252e+02, 8.83246521e+02, + 8.85256592e+02, 8.87251526e+02, 8.89231140e+02, + 8.91195435e+02, 8.93144226e+02, 8.95077515e+02, + 8.96995178e+02, 8.98897217e+02, 9.00783508e+02, + 9.02653931e+02, 9.04508484e+02, 9.06347107e+02, + 9.08169617e+02, 9.09976074e+02, 9.11766296e+02, + 9.13540283e+02, 9.15297974e+02, 9.17039246e+02, + 9.18764038e+02, 9.20472290e+02, 9.22163940e+02, + 9.23838989e+02, 9.25497253e+02, 9.27138733e+02, + 9.28763306e+02, 9.30371033e+02, 9.31961731e+02, + 9.33535339e+02, 9.35091858e+02, 9.36631226e+02, + 9.38153320e+02, 9.39658142e+02, 9.41145630e+02, + 9.42615662e+02, 9.44068237e+02, 9.45503235e+02, + 9.46920715e+02, 9.48320496e+02, 9.49702637e+02, + 9.51066956e+02, 9.52413513e+02, 9.53742188e+02, + 9.55052979e+02, 9.56345764e+02, 9.57620605e+02, + 9.58877319e+02, 9.60115906e+02, 9.61336365e+02, + 9.62538574e+02, 9.63722595e+02, 9.64888245e+02, + 9.66035583e+02, 9.67164490e+02, 9.68274963e+02, + 9.69366943e+02, 9.70440369e+02, 9.71495239e+02, + 9.72531555e+02, 9.73549133e+02, 9.74548096e+02, + 9.75528259e+02, 9.76489685e+02, 9.77432251e+02, + 9.78356018e+02, 9.79260864e+02, 9.80146851e+02, + 9.81013855e+02, 9.81861816e+02, 9.82690796e+02, + 9.83500732e+02, 9.84291565e+02, 9.85063293e+02, + 9.85815857e+02, 9.86549255e+02, 9.87263428e+02, + 9.87958374e+02, 9.88634033e+02, 9.89290466e+02, + 9.89927551e+02, 9.90545288e+02, 9.91143616e+02, + 9.91722595e+02, 9.92282166e+02, 9.92822327e+02, + 9.93342957e+02, 9.93844177e+02, 9.94325867e+02, + 9.94788086e+02, 9.95230713e+02, 9.95653809e+02, + 9.96057373e+02, 9.96441284e+02, 9.96805664e+02, + 9.97150391e+02, 9.97475525e+02, 9.97781006e+02, + 9.98066833e+02, 9.98332947e+02, 9.98579468e+02, + 9.98806274e+02, 9.99013367e+02, 9.99200745e+02, + 9.99368469e+02, 9.99516479e+02, 9.99644714e+02, + 9.99753296e+02, 9.99842102e+02, 9.99911194e+02, + 9.99960510e+02, 9.99990112e+02, 1.00000000e+03, + 9.99990112e+02, 9.99960510e+02, 9.99911194e+02, + 9.99842102e+02, 9.99753296e+02, 9.99644714e+02, + 9.99516479e+02, 9.99368469e+02, 9.99200745e+02, + 9.99013367e+02, 9.98806274e+02, 9.98579468e+02, + 9.98332947e+02, 9.98066833e+02, 9.97781006e+02, + 9.97475525e+02, 9.97150391e+02, 9.96805664e+02, + 9.96441284e+02, 9.96057373e+02, 9.95653809e+02, + 9.95230713e+02, 9.94788086e+02, 9.94325867e+02, + 9.93844177e+02, 9.93342957e+02, 9.92822327e+02, + 9.92282166e+02, 9.91722595e+02, 9.91143616e+02, + 9.90545288e+02, 9.89927551e+02, 9.89290466e+02, + 9.88634033e+02, 9.87958374e+02, 9.87263428e+02, + 9.86549255e+02, 9.85815857e+02, 9.85063293e+02, + 9.84291565e+02, 9.83500732e+02, 9.82690796e+02, + 9.81861816e+02, 9.81013855e+02, 9.80146851e+02, + 9.79260864e+02, 9.78356018e+02, 9.77432251e+02, + 9.76489685e+02, 9.75528259e+02, 9.74548096e+02, + 9.73549133e+02, 9.72531555e+02, 9.71495239e+02, + 9.70440369e+02, 9.69366943e+02, 9.68274963e+02, + 9.67164490e+02, 9.66035583e+02, 9.64888245e+02, + 9.63722595e+02, 9.62538574e+02, 9.61336365e+02, + 9.60115906e+02, 9.58877319e+02, 9.57620605e+02, + 9.56345764e+02, 9.55052979e+02, 9.53742188e+02, + 9.52413513e+02, 9.51066956e+02, 9.49702637e+02, + 9.48320496e+02, 9.46920715e+02, 9.45503235e+02, + 9.44068237e+02, 9.42615662e+02, 9.41145630e+02, + 9.39658142e+02, 9.38153320e+02, 9.36631226e+02, + 9.35091858e+02, 9.33535339e+02, 9.31961731e+02, + 9.30371033e+02, 9.28763306e+02, 9.27138733e+02, + 9.25497253e+02, 9.23838989e+02, 9.22163940e+02, + 9.20472290e+02, 9.18764038e+02, 9.17039246e+02, + 9.15297974e+02, 9.13540283e+02, 9.11766296e+02, + 9.09976074e+02, 9.08169617e+02, 9.06347107e+02, + 9.04508484e+02, 9.02653931e+02, 9.00783508e+02, + 8.98897217e+02, 8.96995178e+02, 8.95077515e+02, + 8.93144226e+02, 8.91195435e+02, 8.89231140e+02, + 8.87251526e+02, 8.85256592e+02, 8.83246521e+02, + 8.81221252e+02, 8.79180969e+02, 8.77125671e+02, + 8.75055542e+02, 8.72970581e+02, 8.70870911e+02, + 8.68756531e+02, 8.66627686e+02, 8.64484314e+02, + 8.62326538e+02, 8.60154541e+02, 8.57968262e+02, + 8.55767822e+02, 8.53553406e+02, 8.51325012e+02, + 8.49082703e+02, 8.46826660e+02, 8.44556885e+02, + 8.42273560e+02, 8.39976685e+02, 8.37666382e+02, + 8.35342773e+02, 8.33005920e+02, 8.30655945e+02, + 8.28292908e+02, 8.25916870e+02, 8.23527954e+02, + 8.21126343e+02, 8.18711975e+02, 8.16285095e+02, + 8.13845703e+02, 8.11393860e+02, 8.08929810e+02, + 8.06453552e+02, 8.03965149e+02, 8.01464783e+02, + 7.98952515e+02, 7.96428406e+02, 7.93892639e+02, + 7.91345215e+02, 7.88786377e+02, 7.86216064e+02, + 7.83634460e+02, 7.81041687e+02, 7.78437805e+02, + 7.75822937e+02, 7.73197144e+02, 7.70560608e+02, + 7.67913391e+02, 7.65255615e+02, 7.62587341e+02, + 7.59908691e+02, 7.57219788e+02, 7.54520691e+02, + 7.51811584e+02, 7.49092529e+02, 7.46363647e+02, + 7.43625061e+02, 7.40876831e+02, 7.38119080e+02, + 7.35351990e+02, 7.32575562e+02, 7.29789917e+02, + 7.26995239e+02, 7.24191589e+02, 7.21379089e+02, + 7.18557861e+02, 7.15728027e+02, 7.12889648e+02, + 7.10042847e+02, 7.07187805e+02, 7.04324524e+02, + 7.01453247e+02, 6.98573975e+02, 6.95686829e+02, + 6.92791992e+02, 6.89889526e+02, 6.86979614e+02, + 6.84062256e+02, 6.81137695e+02, 6.78205933e+02, + 6.75267151e+02, 6.72321472e+02, 6.69368958e+02, + 6.66409790e+02, 6.63444031e+02, 6.60471802e+02, + 6.57493286e+02, 6.54508484e+02, 6.51517639e+02, + 6.48520813e+02, 6.45518066e+02, 6.42509644e+02, + 6.39495544e+02, 6.36475952e+02, 6.33450989e+02, + 6.30420776e+02, 6.27385376e+02, 6.24344971e+02, + 6.21299622e+02, 6.18249512e+02, 6.15194702e+02, + 6.12135376e+02, 6.09071594e+02, 6.06003540e+02, + 6.02931274e+02, 5.99854980e+02, 5.96774719e+02, + 5.93690674e+02, 5.90602905e+02, 5.87511536e+02, + 5.84416748e+02, 5.81318604e+02, 5.78217224e+02, + 5.75112793e+02, 5.72005371e+02, 5.68895142e+02, + 5.65782166e+02, 5.62666626e+02, 5.59548584e+02, + 5.56428223e+02, 5.53305603e+02, 5.50180847e+02, + 5.47054138e+02, 5.43925598e+02, 5.40795288e+02, + 5.37663391e+02, 5.34530029e+02, 5.31395264e+02, + 5.28259277e+02, 5.25122131e+02, 5.21984070e+02, + 5.18845093e+02, 5.15705383e+02, 5.12565063e+02, + 5.09424225e+02, 5.06283020e+02, 5.03141571e+02, + 5.00000000e+02, 4.96858429e+02, 4.93716980e+02, + 4.90575775e+02, 4.87434967e+02, 4.84294617e+02, + 4.81154907e+02, 4.78015930e+02, 4.74877838e+02, + 4.71740723e+02, 4.68604736e+02, 4.65470001e+02, + 4.62336609e+02, 4.59204681e+02, 4.56074402e+02, + 4.52945831e+02, 4.49819153e+02, 4.46694427e+02, + 4.43571808e+02, 4.40451416e+02, 4.37333374e+02, + 4.34217834e+02, 4.31104858e+02, 4.27994598e+02, + 4.24887207e+02, 4.21782776e+02, 4.18681427e+02, + 4.15583282e+02, 4.12488464e+02, 4.09397125e+02, + 4.06309357e+02, 4.03225281e+02, 4.00145020e+02, + 3.97068695e+02, 3.93996460e+02, 3.90928375e+02, + 3.87864624e+02, 3.84805298e+02, 3.81750488e+02, + 3.78700378e+02, 3.75655060e+02, 3.72614624e+02, + 3.69579254e+02, 3.66549011e+02, 3.63524017e+02, + 3.60504456e+02, 3.57490356e+02, 3.54481903e+02, + 3.51479218e+02, 3.48482361e+02, 3.45491516e+02, + 3.42506744e+02, 3.39528198e+02, 3.36556000e+02, + 3.33590240e+02, 3.30631042e+02, 3.27678528e+02, + 3.24732849e+02, 3.21794067e+02, 3.18862305e+02, + 3.15937714e+02, 3.13020386e+02, 3.10110443e+02, + 3.07208008e+02, 3.04313171e+02, 3.01426056e+02, + 2.98546783e+02, 2.95675476e+02, 2.92812195e+02, + 2.89957123e+02, 2.87110352e+02, 2.84271973e+02, + 2.81442108e+02, 2.78620880e+02, 2.75808380e+02, + 2.73004761e+02, 2.70210083e+02, 2.67424469e+02, + 2.64648041e+02, 2.61880890e+02, 2.59123169e+02, + 2.56374939e+02, 2.53636322e+02, 2.50907440e+02, + 2.48188400e+02, 2.45479294e+02, 2.42780228e+02, + 2.40091324e+02, 2.37412689e+02, 2.34744415e+02, + 2.32086609e+02, 2.29439377e+02, 2.26802826e+02, + 2.24177063e+02, 2.21562195e+02, 2.18958313e+02, + 2.16365524e+02, 2.13783936e+02, 2.11213654e+02, + 2.08654755e+02, 2.06107376e+02, 2.03571594e+02, + 2.01047516e+02, 1.98535233e+02], dtype=np.float32), +'ASAM.M.SCALAR.UWORD.IDENTICAL.BITMASK_0008': np.array([0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, + 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, + 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, + 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, + 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, + 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, + 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, + 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, + 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, + 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, + 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, + 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, + 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, + 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, + 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, + 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, + 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, + 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, + 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, + 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, + 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, + 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, + 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, + 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, + 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, + 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1], dtype=np.uint8), +'ASAM.M.VIRTUAL.SCALAR.SWORD.PHYSICAL': np.array([ -544, -464, -384, -304, -224, -144, -64, 16, 96, + 176, 256, 336, 416, 496, 576, 656, 736, 816, + 896, 976, -992, -912, -832, -752, -672, -592, -512, + -432, -352, -272, 0, 80, 160, 240, 320, 400, + 480, 560, 640, 720, 800, 880, 960, -1008, -928, + -848, -768, -688, -608, -528, -448, -368, -288, -208, + -128, -48, 32, 112, 192, 272, 352, 432, 512, + 592, 672, 752, 832, 912, 992, -976, -896, -816, + -736, -656, -576, -496, -416, -336, -256, -176, -96, + -16, 64, 144, 224, 304, 384, 464, 544, 624], dtype=np.int16), + } + +CHANNELS_ARRAY = { + 'Int32': np.array(range(-20, 20), dtype=np.int32), + 'Float64': np.array(range(-20, 20), dtype=np.float64) * 0.25, + 'Uint8': np.array(range(0, 80, 2), dtype=np.uint8), + 'Uint64': np.ones(40, dtype=np.uint64) * 2**40, + 'XAxis': np.array([0, 1, 2] * 40, np.uint8).reshape(40,3,), + 'YAxis': np.array([0, 1, 2, 3] * 40, np.uint8).reshape(40,4), + 'Saw': np.arange(0, 1200, 10, dtype=np.uint16) % 0xFF, + 'Ones': np.ones(120, dtype=np.int8), + 'Cos': np.cos(np.arange(0, 360, 3, dtype=np.float32)/360*2*np.pi, dtype=np.float32).astype(np.float64), + 'Sin': np.sin(np.arange(0, 360, 3, dtype=np.float32)/360*2*np.pi, dtype=np.float32).astype(np.float64), + 'Zeros': np.zeros(120, dtype=np.int32), +} + +CHANNELS_ARRAY['Sin'][70] = -0.5 + +l = [] +for i in range(40): + l.append([i,] * 12) +arrays = [ + np.array(l, np.int16).reshape(40,3,4), + CHANNELS_ARRAY['XAxis'], + CHANNELS_ARRAY['YAxis'], +] +types = [ + ('Int16Array', np.int16, (3,4)), + ('XAxis', np.uint8, (3,)), + ('YAxis', np.uint8, (4,)), +] +CHANNELS_ARRAY['Int16Array'] = np.core.records.fromarrays( + arrays, + dtype=np.dtype(types), +) + +arrays = [CHANNELS_ARRAY['XAxis'], ] +types = [('XAxis', np.uint8, (3,)),] +CHANNELS_ARRAY['XAxis'] = np.core.records.fromarrays( + arrays, + dtype=np.dtype(types), +) + +arrays = [CHANNELS_ARRAY['YAxis'], ] +types = [('YAxis', np.uint8, (4,)),] +CHANNELS_ARRAY['YAxis'] = np.core.records.fromarrays( + arrays, + dtype=np.dtype(types), +) + +l = [] +for i in range(-60, 60): + l.append([0.1*i,] * 9) +arrays = [np.array(l, np.float32).reshape(120,3,3),] +types = [('Matrix', np.float32, (3,3)),] +CHANNELS_ARRAY['Matrix'] = np.core.records.fromarrays( + arrays, + dtype=np.dtype(types), +) + +arrays = [ + CHANNELS_ARRAY['Saw'], + CHANNELS_ARRAY['Ones'], + CHANNELS_ARRAY['Cos'], + CHANNELS_ARRAY['Sin'], + CHANNELS_ARRAY['Zeros'], +] +types = [ + ('Saw', np.uint16), + ('Ones', np.int8), + ('Cos', np.float64), + ('Sin', np.float64), + ('Zeros', np.int32), +] +CHANNELS_ARRAY['Maths'] = np.core.records.fromarrays( + arrays, + dtype=np.dtype(types), +) + +arrays = [ + CHANNELS_ARRAY['Int32'], + CHANNELS_ARRAY['Float64'], + CHANNELS_ARRAY['Uint8'], + CHANNELS_ARRAY['Uint64'], +] +types = [ + ('Int32', np.int32), + ('Float64', np.float64), + ('Uint8', np.uint8), + ('Uint64', np.uint64), +] +CHANNELS_ARRAY['Composed'] = np.core.records.fromarrays( + arrays, + dtype=np.dtype(types), +) + +COMMENTS = { + "ASAM.M.VIRTUAL.SCALAR.SWORD.PHYSICAL": "Virtual measurement with 2 * ASAM.M.SCALAR.SBYTE.LINEAR_MUL_2 as input (based on the phys value of ASAM.M.SCALAR.SBYTE.LINEAR_MUL_2", + "ASAM_[0][0].M.MATRIX_DIM_8_2_1.UBYTE.IDENTICAL": "Scalar measurement", + "ASAM_[0].M.MATRIX_DIM_16_1_1.UBYTE.IDENTICAL": "Scalar measurement", + "ASAM_[0].M.ARRAY_SIZE_16.UBYTE.IDENTICAL": "Scalar measurement", + "ASAM.M.SCALAR.UBYTE.TAB_NOINTP_DEFAULT_VALUE": "Scalar measurement with verbal conversion and default value", + "ASAM.M.SCALAR.UBYTE.TAB_INTP_NO_DEFAULT_VALUE": "Scalar measurement with verbal conversion and default value", + "ASAM.M.SCALAR.UBYTE.TAB_INTP_DEFAULT_VALUE": "Scalar measurement with verbal conversion and default value", + "ASAM.M.SCALAR.UBYTE.IDENTICAL": "Scalar measurement", + "ASAM.M.SCALAR.UBYTE.FORM_X_PLUS_4": "Scalar measurement", + "ASAM.M.SCALAR.SWORD.IDENTICAL": "Scalar measurement", + "ASAM.M.SCALAR.SLONG.IDENTICAL": "Scalar measurement", + "ASAM.M.SCALAR.SBYTE.LINEAR_MUL_2": "Scalar measurement", + "ASAM.M.SCALAR.UWORD.IDENTICAL.BITMASK_0FF0": "Scalar measurement with Bitmask for a bit-area", + "ASAM.M.SCALAR.UWORD.IDENTICAL.BITMASK_0008": "Scalar measurement with Bitmask for a single bit", + "ASAM.M.SCALAR.UWORD.IDENTICAL": "Scalar measurement", + "ASAM.M.SCALAR.ULONG.IDENTICAL": "Scalar measurement", + "ASAM.M.SCALAR.UBYTE.VTAB_RANGE_NO_DEFAULT_VALUE": "Scalar measurement with verbal conversion and default value", + "ASAM.M.SCALAR.UBYTE.VTAB_RANGE_DEFAULT_VALUE": "Scalar measurement with verbal conversion and default value", + "ASAM.M.SCALAR.UBYTE.TAB_VERB_NO_DEFAULT_VALUE": "Scalar measurement with verbal conversion", + "ASAM.M.SCALAR.UBYTE.TAB_VERB_DEFAULT_VALUE": "Scalar measurement with verbal conversion and default value", + "ASAM.M.SCALAR.UBYTE.TAB_NOINTP_NO_DEFAULT_VALUE": "Scalar measurement with verbal conversion and default value", + "ASAM.M.SCALAR.FLOAT64.IDENTICAL": "Scalar measurement", + "ASAM.M.SCALAR.FLOAT32.IDENTICAL": "Scalar measurement", + "$CalibrationLog": "", + "$ActiveCalibrationPage": "", +} + +UNITS = { + "ASAM.M.VIRTUAL.SCALAR.SWORD.PHYSICAL": "", + "ASAM_[0][0].M.MATRIX_DIM_8_2_1.UBYTE.IDENTICAL": "hours", + "ASAM_[0].M.MATRIX_DIM_16_1_1.UBYTE.IDENTICAL": "hours", + "ASAM_[0].M.ARRAY_SIZE_16.UBYTE.IDENTICAL": "hours", + "ASAM.M.SCALAR.UBYTE.TAB_NOINTP_DEFAULT_VALUE": "U/ min", + "ASAM.M.SCALAR.UBYTE.TAB_INTP_NO_DEFAULT_VALUE": "U/ min", + "ASAM.M.SCALAR.UBYTE.TAB_INTP_DEFAULT_VALUE": "U/ min", + "ASAM.M.SCALAR.UBYTE.IDENTICAL": "hours", + "ASAM.M.SCALAR.UBYTE.FORM_X_PLUS_4": "rpm", + "ASAM.M.SCALAR.SWORD.IDENTICAL": "hours", + "ASAM.M.SCALAR.SLONG.IDENTICAL": "hours", + "ASAM.M.SCALAR.SBYTE.LINEAR_MUL_2": "m/s", + "ASAM.M.SCALAR.UWORD.IDENTICAL.BITMASK_0FF0": "hours", + "ASAM.M.SCALAR.UWORD.IDENTICAL.BITMASK_0008": "hours", + "ASAM.M.SCALAR.UWORD.IDENTICAL": "hours", + "ASAM.M.SCALAR.ULONG.IDENTICAL": "hours", + "ASAM.M.SCALAR.UBYTE.VTAB_RANGE_NO_DEFAULT_VALUE": "", + "ASAM.M.SCALAR.UBYTE.VTAB_RANGE_DEFAULT_VALUE": "", + "ASAM.M.SCALAR.UBYTE.TAB_VERB_NO_DEFAULT_VALUE": "", + "ASAM.M.SCALAR.UBYTE.TAB_VERB_DEFAULT_VALUE": "", + "ASAM.M.SCALAR.UBYTE.TAB_NOINTP_NO_DEFAULT_VALUE": "U/ min", + "ASAM.M.SCALAR.FLOAT64.IDENTICAL": "hours", + "ASAM.M.SCALAR.FLOAT32.IDENTICAL": "hours", + "$CalibrationLog": "", + "$ActiveCalibrationPage": "", +} + def get_test_data(filename=""): """ Utility functions needed by all test scripts.