forked from ladisk/pyuff
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpyuff.py
1565 lines (1439 loc) · 75.1 KB
/
pyuff.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright (C) 2014-2017 Primož Čermelj, Matjaž Mršnik, Miha Pirnat, Janko Slavič, Blaž Starc (in alphabetic order)
#
# This file is part of pyuff.
#
# pyFRF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# pyuff is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyuff. If not, see <http://www.gnu.org/licenses/>.
"""
==========
pyuff module
==========
This module is part of the www.openmodal.com project and
defines an UFF class to manipulate with the
UFF (Universal File Format) files, i.e., to read from and write
to UFF files. Among the variety of UFF formats, only some of the
formats (data-set types) frequently used in structural dynamics
are supported: **151, 15, 55, 58, 58b, 82, 164.** Data-set **58b**
is actually a hybrid format [1]_ where the signal is written in the
binary form, while the header-part is slightly different from 58 but still in the
ascii format.
An UFF file is a file that can have many data-sets of either ascii or binary
data where data-set is a block of data between the start and end tags ``____-1``
(``_`` representing the space character). Refer to [1]_ and [2]_ for
more information about the UFF format.
This module also provides an exception handler class, ``UFFException``.
Sources:
.. [1] http://www.sdrl.uc.edu/uff/SDRChelp/LANG/English/unv_ug/book.htm
.. [2] Matlab's ``readuff`` and ``writeuff`` functions:
http://www.mathworks.com/matlabcentral/fileexchange/loadFile.do?objectId=6395
Acknowledgement:
* This source (py2.7) was first written in 2007, 2008 by Primoz Cermelj ([email protected])
* As part of the www.openmodal.com project the first source was adopted for Python 3 by
Matjaz Mrsnik <[email protected]>
Notes:
* 58 data-set is always written in double precision, even if it is
read in single precision.
* ``numpy`` module is required as all the vector/matrix-type data are read
or written using ``numpy.array`` objects.
To do:
* search for ``??`` in the source and check what is missing or
what should be changed
Performance:
* To read all the sets from a certain UFF file can take a while if the file
is large and, especially, if the file contains mostly ASCII-formated data.
Therefore, for large files (files having many data-sets) it is recommended
that the data-sets are written in binary format (if available) - this
mainly applies to 58-type data-set.
* For example, to read a 85 MB file of 160 58 data-sets (in ASCII format) it
took 15 secs on the AMD 4200 Dual Core (to compare with the Matlab's
implementation [2]_ where it took 13 secs). But, when reading 4093
data-sets from a 110 MB file, entirely in 58b format (binary), it took
only 8.5 secs (the Matlab's implementations took 20 secs).
Example:
>>> import pyuff
>>> uff_file = pyuff.UFF('beam.uff')
>>> uff_file.file_exists()
True
"""
import os, sys, struct
import string
import time
import numpy as np
__version__ = '1.17'
_SUPPORTED_SETS = ['151', '15', '55', '58', '58b', '82', '164', '2411', '2412', '2420']
class UFFException(Exception):
"""An exception that prints a string describing the error.
"""
def __init__(self,value):
self.value = value
def __str__(self):
return self.value
class UFF:
"""
Manages data reading and writing from/to the UFF file.
The UFF class instance requires exactly 1 parameter - a file name of a
universal file. If the file does not exist, no basic file info will be
extracted and the status will be False - indicating that the file is not
refreshed. Hovewer, when one tries to read one or more data-sets, the file
must exist or the UFFException will be raised.
The file, given as a parameter to the UFF instance, is open only when
reading from or writing to the file. The UFF instance refreshes the file
automatically - use ``UFF.get_status()`` to see the refresh status); note
that this works fine if the file is being changed only through the UFF
instance and not by other functions or even by other means, e.g.,
externally. If the file is changed externally, the ``UFF.refresh()`` should
be invoked before any reading or writing.
All array-type data are read/written using numpy's ``np.array`` module.
Appendix
--------
Below are the fileds for all the data sets supported. <..> designates an
*optional* field, i.e., a field that is not needed when writting to a file
as the field has a defualt value. Additionally, <<..>> designates fields that
are *ignored* when writing (not needed at all); these fields are defined
automatically.
Moreover, there are also some fields that are data-type dependent, i.e.,
some fields that are onyl used/available for some specific data-type. E.g.,
see ``modal_damp_vis`` at Data-set 55.
**Data-set 15 (points data)**:
* ``'type'`` -- *type number = 15*
* ``'node_nums'`` -- *list of n node numbers*
* ``'x'`` -- *x-coordinates of the n nodes*
* ``'y'`` -- *y-coordinates of the n nodes*
* ``'z'`` -- *z-coordinates of the n nodes*
* ``<'def_cs'>`` -- *n deformation cs numbers*
* ``<'disp_cs'>`` -- *n displacement cs numbers*
* ``<'color'>`` -- *n color numbers*
**Data-set 82 (line data)**:
* ``'type'`` -- *type number = 82*
* ``'trace_num'`` -- *number of the trace*
* ``'lines'`` -- *list of n line numbers*
* ``<'id'>`` -- *id string*
* ``<'color'>`` -- *color number*
* ``<<'n_nodes'>>`` -- *number of nodes*
**Data-set 151 (header data)**:
* ``'type'`` -- *type number = 151*
* ``'model_name'`` -- *name of the model*
* ``'description'`` -- *description of the model*
* ``'db_app'`` -- *name of the application that
created database*
* ``'program'`` -- *name of the program*
* ``'model_name'`` -- *the name of the model*
* ``<'version_db1'>`` -- *version string 1 of the database*
* ``<'version_db2'>`` -- *version string 2 of the database*
* ``<'file_type'>`` -- *file type string*
* ``<'date_db_created'>`` -- *date database was created*
* ``<'time_db_created'>`` -- *time database was created*
* ``<'date_db_saved'>`` -- *date database was saved*
* ``<'time_db_saved'>`` -- *time database was saved*
* ``<'date_file_written'>``-- *date file was written*
* ``<'time_file_written'>``-- *time file was written*
**Data-set 164 (units)**:
* ``'type'`` -- *type number = 164*
* ``'units_code'`` -- *units code number*
* ``'length'`` -- *length factor*
* ``'force'`` -- *force factor*
* ``'temp'`` -- *temperature factor*
* ``'temp_offset'`` -- *temperature-offset factor*
* ``<'units_description'>``-- *units description*
* ``<'temp_mode'>`` -- *temperature mode number*
**Data-set 58<b> (function at nodal DOF)**:
* ``'type'`` -- *type number = 58*
* ``'func_type'`` -- *function type; only 1, 2, 3, 4
and 6 are supported*
* ``'rsp_node'`` -- *response node number*
* ``'rsp_dir'`` -- *response direction number*
* ``'ref_node'`` -- *reference node number*
* ``'ref_dir'`` -- *reference direction number*
* ``'data'`` -- *data array*
* ``'x'`` -- *abscissa array*
* ``<'binary'>`` -- *1 for binary, 0 for ascii*
* ``<'id1'>`` -- *id string 1*
* ``<'id2'>`` -- *id string 2*
* ``<'id3'>`` -- *id string 3*
* ``<'id4'>`` -- *id string 4*
* ``<'id5'>`` -- *id string 5*
* ``<'load_case_id'>`` -- *id number for the load case*
* ``<'rsp_ent_name'>`` -- *entity name for the response*
* ``<'ref_ent_name'>`` -- *entity name for the reference*
* ``<'abscissa_axis_units_lab'>``-- *label for the units on the abscissa*
* ``<'abscissa_len_unit_exp'>`` -- *exp for the length unit on the abscissa*
* ``<'abscissa_force_unit_exp'>``-- *exp for the force unit on the abscissa*
* ``<'abscissa_temp_unit_exp'>`` -- *exp for the temperature unit on the abscissa*
* ``<'ordinate_axis_units_lab'>``-- *label for the units on the ordinate*
* ``<'ordinate_len_unit_exp'>`` -- *exp for the length unit on the ordinate*
* ``<'ordinate_force_unit_exp'>``-- *exp for the force unit on the ordinate*
* ``<'ordinate_temp_unit_exp'>`` -- *exp for the temperature unit on the ordinate*
* ``<'orddenom_axis_units_lab'>``-- *label for the units on the ordinate denominator*
* ``<'orddenom_len_unit_exp'>`` -- *exp for the length unit on the ordinate denominator*
* ``<'orddenom_force_unit_exp'>``-- *exp for the force unit on the ordinate denominator*
* ``<'orddenom_temp_unit_exp'>`` -- *exp for the temperature unit on the ordinate denominator*
* ``<'z_axis_axis_units_lab'>`` -- *label for the units on the z axis*
* ``<'z_axis_len_unit_exp'>`` -- *exp for the length unit on the z axis*
* ``<'z_axis_force_unit_exp'>`` -- *exp for the force unit on the z axis*
* ``<'z_axis_temp_unit_exp'>`` -- *exp for the temperature unit on the z axis*
* ``<'z_axis_value'>`` -- *z axis value*
* ``<'spec_data_type'>`` -- *specific data type*
* ``<'abscissa_spec_data_type'>``-- *abscissa specific data type*
* ``<'ordinate_spec_data_type'>``-- *ordinate specific data type*
* ``<'orddenom_spec_data_type'>``-- *ordinate denominator specific data type*
* ``<'z_axis_spec_data_type'>`` -- *z-axis specific data type*
* ``<'ver_num'>`` -- *version number*
* ``<<'ord_data_type'>>`` -- *ordinate data type*
* ``<<'abscissa_min'>>`` -- *abscissa minimum*
* ``<<'byte_ordering'>>`` -- *byte ordering*
* ``<<'fp_format'>>`` -- *floating-point format*
* ``<<'n_ascii_lines'>>`` -- *number of ascii lines*
* ``<<'n_bytes'>>`` -- *number of bytes*
* ``<<'num_pts'>>`` -- *number of data pairs for
uneven abscissa or number of data values for even abscissa*
* ``<<'abscissa_spacing'>>`` -- *abscissa spacing; 0=uneven,
1=even*
* ``<<'abscissa_inc'>>`` -- *abscissa increment; 0 if
spacing uneven*
**Data-set 55 (data at nodes)**:
* ``'type'`` -- *type number = 55*
* ``'analysis_type'`` -- *analysis type number; currently
only only normal mode (2), complex eigenvalue first order
(displacement) (3), frequency response and (5) and complex eigenvalue
second order (velocity) (7) are supported*
* ``'data_ch'`` -- *data-characteristic number*
* ``'spec_data_type'`` -- *specific data type number*
* ``'load_case'`` -- *load case number*
* ``'mode_n'`` -- *mode number; applicable to
analysis types 2, 3 and 7 only*
* ``'eig'`` -- *eigen frequency (complex number);
applicable to analysis types 3 and 7 only*
* ``'freq'`` -- *frequency (Hz); applicable to
analysis types 2 and 5 only*
* ``'freq_step_n'`` -- *frequency step number; applicable
to analysis type 5 only*
* ``'node_nums'`` -- *node numbers*
* ``'r1'..'r6'`` -- *response array for each DOF; when
response is complex only r1 through r3 will be used*
* ``<'id1'>`` -- *id1 string*
* ``<'id2'>`` -- *id2 string*
* ``<'id3'>`` -- *id3 string*
* ``<'id4'>`` -- *id4 string*
* ``<'id5'>`` -- *id5 string*
* ``<'model_type'>`` -- *model type number*
* ``<'modal_m'>`` -- *modal mass; applicable to
analysis type 2 only*
* ``<'modal_damp_vis'>`` -- *modal viscous damping ratio;
applicable to analysis type 2 only*
* ``<'modal_damp_his'>`` -- *modal hysteretic damping ratio;
applicable to analysis type 2 only*
* ``<'modal_b'>`` -- *modal-b (complex number);
applicable to analysis types 3 and 7 only*
* ``<'modal_a'>`` -- *modal-a (complex number);
applicable to analysis types 3 and 7 only*
* ``<<'n_data_per_node'>>``-- *number of data per node (DOFs)*
* ``<<'data_type'>>`` -- *data type number; 2 = real data,
5 = complex data*
"""
def __init__(self, fileName):
"""
Initializes the uff object and extract the basic info:
the number of sets, types of the sets and format of the sets (ascii
or binary). To manually refresh this info, call the refresh method
manually.
Whenever some data is written to a file, a read-only flag
indicates that the file needs to be refreshed - before any reading,
the file is refreshed automatically (when needed).
"""
# Some "private" members
self._fileName = fileName
self._blockInd = [] # an array of block indices: start-end pairs in rows
self._refreshed = False
self._nSets = 0 # number of sets found in file
self._setTypes = np.array(()) # list of set-type numbers
self._setFormats = np.array(()) # list of set-format numbers (0=ascii,1=binary)
# Refresh
self.refresh()
def get_supported_sets(self):
"""Returns a list of data-sets supported for reading and writing."""
return _SUPPORTED_SETS
def get_n_sets(self):
"""
Returns the number of valid sets found in the file."""
if not self._refreshed: self.refresh()
return self._nSets
def get_set_types(self):
"""
Returns an array of data-set types. All valid data-sets are returned,
even those that are not supported, i.e., whose contents will not be
read.
"""
if not self._refreshed: self.refresh()
return self._setTypes
def get_set_formats(self):
"""Returns an array of data-set formats: 0=ascii, 1=binary."""
if not self._refreshed: self.refresh()
return self._setFormats
def get_file_name(self):
"""Returns the file name (as a string) associated with the uff object."""
return self._fileName
def file_exists(self):
"""
Returns true if the file exists and False otherwise. If the file does
not exist, invoking one of the read methods would raise the UFFException
exception.
"""
return os.path.exists(self._fileName)
def get_status(self):
"""
Returns the file status, i.e., True if the file is refreshed and
False otherwise.
"""
return self._refreshed
def refresh(self):
"""
Extract/refreshes the info of all the sets from UFF file (if the file
exists). The file must exist and must be accessable otherwise, an
error is raised. If the file cannot be refreshed, False is returned and
True otherwise.
"""
self._refreshed = False
if not self.file_exists():
return False # cannot read the file if it does not exist
try:
fh = open(self._fileName, 'rb')
# fh = open(self._fileName, 'rt')
except:
raise UFFException('Cannot access the file %s' % self._fileName)
else:
try:
# Parses the entire file for ' -1' tags and extracts
# the corresponding indices
data = fh.read()
dataLen = len(data)
ind = -1
blockInd = []
while True:
ind = data.find(b' -1',ind+1)
if ind == -1: break
blockInd.append(ind)
blockInd = np.asarray(blockInd, dtype='int')
# Constructs block indices of start and end values; each pair
# points to start and end offset of the data-set (block) data,
# but, the start ' -1' tag is included while the end one is
# excluded.
nBlocks = int(np.floor(len(blockInd) / 2.0))
if nBlocks == 0:
# No valid blocks found but the file is still considered
# being refreshed
fh.close()
self._refreshed = True
return self._refreshed
self._blockInd = np.zeros((nBlocks, 2), dtype='int')
self._blockInd[:,0] = blockInd[:-1:2].copy()
self._blockInd[:,1] = blockInd[1::2].copy()-1
# Go through all the data-sets (blocks) and extract data-set
# type and the property whether the data-set is in binary
# or ascii format
self._nSets = nBlocks
self._setTypes = np.zeros(nBlocks)
self._setFormats = np.zeros(nBlocks)
for ii in range(0,self._nSets):
si = self._blockInd[ii,0]
ei = self._blockInd[ii,1]
try:
blockData = data[si:ei+1].splitlines()
self._setTypes[ii] = int(blockData[1][0:6])
if blockData[1][6].lower()=='b':
self._setFormats[ii] = 1
except:
# Some non-valid blocks found; ignore the exception
pass
del blockInd
except:
fh.close()
raise UFFException('Error refreshing UFF file: '+self._fileName)
else:
self._refreshed = True
fh.close()
return self._refreshed
def read_sets(self, setn=None):
"""
Reads sets from the list or array ``setn``. If ``setn=None``, all
sets are read (default). Sets are numbered starting at 0, ending at
n-1. The method returns a list of dset dictionaries - as
many dictionaries as there are sets. Unknown data-sets are returned
empty.
User must be sure that, since the last reading/writing/refreshing,
the data has not changed by some other means than through the
UFF object.
"""
dset = []
if setn == None:
readRange = range(0,self._nSets)
else:
if (not type(setn).__name__ == 'list'):
readRange = [setn]
else:
readRange = setn
if not self.file_exists():
raise UFFException('Cannot read from a non-existing file: '+self._fileName)
if not self._refreshed:
if not self._refresh():
raise UFFException('Cannot read from the file: '+self._fileName)
try:
for ii in readRange:
dset.append(self._read_set(ii))
except UFFException as msg:
raise UFFException('Error when reading '+str(ii)+'-th data-set: '+msg.value)
except:
raise UFFException('Error when reading data-set(s)')
if len(dset) == 1:
dset = dset[0]
return dset
## def read_all_sets(self):
## """Reads all the sets from UFF file.
## The method returns a list of dsets each containing as many
## dictionaries as there are valid sets found in the file.
## """
## dset = []
## if not self.file_exists():
## raise UFFException('Cannot read from a non-existing file: '+self._fileName)
## if not self._refreshed:
## if not self._refresh():
## raise UFFException('Cannot read from the file: '+self._fileName)
## try:
## for ii in range(0,self._nSets):
## dset.append(self._read_set(ii))
## except UFFException,msg:
## raise UFFException('Error when reading '+str(ii)+'-th data-set: '+msg.value)
## except:
## raise UFFException('Error when reading '+str(ii)+'-th data-set')
## return dset
def write_sets(self, dsets, mode='add'):
"""
Writes several UFF data-sets to the file. The mode can be
either 'add' (default) or 'overwrite'. The dsets is a
list of dictionaries, each representing one data-set. Unsupported
data-sets will be ignored. When only 1 data-set is to be written, no
lists are necessary, i.e., only one dictionary is required.
For each data-set, there are some optional and some required fields at
dset dictionary. Also, in general, the sum of the required
and the optional fields together can be less then the number of fields
read from the same type of data-set; the reason is that for some
data-sets some fields are set automatically. Optional fields are
calculated automatically and the dset is updated - as dset is actually
an alias (aka pointer), this is reflected at the caller too.
"""
if (not type(dsets).__name__ == 'list'):
dsets = [dsets]
nSets = len(dsets)
if nSets < 1 : raise UFFException('Nothing to write')
if mode.lower() == 'overwrite':
# overwrite mode; first set is written in the overwrite mode, others
# in add mode
self._write_set(dsets[0], 'overwrite')
for ii in range(1, nSets):
self._write_set(dsets[ii], 'add')
elif mode.lower() == 'add':
# add mode; all the sets are written in the add mode
for ii in range(0, nSets):
self._write_set(dsets[ii], 'add')
else:
raise UFFException('Unknown mode: '+mode)
def _read_set(self,n):
# Reads n-th set from UFF file. n can be an integer between 0 and nSets-1.
# User must be sure that, since the last reading/writing/refreshing,
# the data has not changed by some other means than through the
# UFF object. The method returns dset dictionary.
dset = {}
if not self.file_exists():
raise UFFException('Cannot read from a non-existing file: '+self._fileName)
if not self._refreshed:
if not self.refresh():
raise UFFException('Cannot read from the file: '+self._fileName+'. The file cannot be refreshed.')
if (n > self._nSets-1) or (n < 0):
raise UFFException('Cannot read data-set: '+str(int(n))+\
'. Data-set number to high or to low.')
# Read n-th data-set data (one block)
try:
fh = open(self._fileName,'rb')
except:
raise UFFException('Cannot access the file: '+self._fileName+' to read from.')
else:
try:
si = self._blockInd[n][0] # start offset
ei = self._blockInd[n][1] # end offset
fh.seek(si)
if self._setTypes[int(n)] == 58:
blockData = fh.read(ei-si+1)#decoding is handled later in _extract58
else:
blockData = fh.read(ei-si+1).decode('ascii')
except:
fh.close()
raise UFFException('Error reading data-set #: '+int(n))
else:
fh.close()
# Extracts the dset
if self._setTypes[int(n)] == 15: dset = self._extract15(blockData)
elif self._setTypes[int(n)] == 2411: dset = self._extract2411(blockData) # TEMP ADD
elif self._setTypes[int(n)] == 2412: dset = self._extract15(blockData) # TEMP ADD
elif self._setTypes[int(n)] == 18: dset = self._extract18(blockData) # TEMP ADD
elif self._setTypes[int(n)]==82: dset = self._extract82(blockData)
elif self._setTypes[int(n)]==2420: dset = self._extract2420(blockData)
elif self._setTypes[int(n)]==151: dset = self._extract151(blockData)
elif self._setTypes[int(n)]==164: dset = self._extract164(blockData)
elif self._setTypes[int(n)]==55: dset = self._extract55(blockData)
elif self._setTypes[int(n)]==58: dset = self._extract58(blockData)
else:
dset['type'] = self._setTypes[int(n)]
# Unsupported data-set - do nothing
pass
return dset
def _write_set(self, dset, mode='add'):
# Writes UFF data (UFF data-sets) to the file. The mode can be
# either 'add' (default) or 'overwrite'. The dset is a
# dictionary of keys and corresponding values. Unsupported
# data-set will be ignored.
#
# For each data-set, there are some optional and some required fields at
# dset dictionary. Also, in general, the sum of the required
# and the optional fields together can be less then the number of fields
# read from the same type of data-set; the reason is that for some
# data-sets some fields are set automatically. Optional fields are
# calculated automatically and the dset is updated - as dset is actually
# an alias (aka pointer), this is reflected at the caller too.
if mode.lower() == 'overwrite':
# overwrite mode
try: fh = open(self._fileName,'wt')
except: raise UFFException('Cannot access the file: '+self._fileName+' to write to.')
elif mode.lower() == 'add':
# add (append) mode
try: fh = open(self._fileName,'at')
except: raise UFFException('Cannot access the file: '+self._fileName+' to write to.')
else:
raise UFFException('Unknown mode: '+mode)
try:
# Actual writing
try: setType = dset['type']
except:
fh.close()
raise UFFException('Data-set\'s dictionary is missing the required \'type\' key')
# handle nan or inf
if 'data' in dset.keys():
dset['data'] = np.nan_to_num(dset['data'])
if setType == 15: self._write15(fh,dset)
elif setType == 82: self._write82(fh,dset)
elif setType == 151: self._write151(fh,dset)
elif setType == 164: self._write164(fh,dset)
elif setType == 55: self._write55(fh,dset)
elif setType == 58: self._write58(fh,dset,mode)
elif setType == 2411: self._write2411(fh,dset)
elif setType == 2420: self._write2420(fh,dset)
else:
# Unsupported data-set - do nothing
pass
except:
fh.close()
raise # re-raise the last exception
else:
fh.close()
self.refresh()
def _write15(self,fh,dset):
# Writes coordinate data - data-set 15 - to an open file fh
try:
n = len(dset['node_nums'])
# handle optional fields
dset = self._opt_fields(dset, {'def_cs':np.asarray([0 for ii in range(0, n)], 'i'),\
'disp_cs':np.asarray([0 for ii in range(0, n)], 'i'),\
'color':np.asarray([0 for ii in range(0, n)], 'i')})
# write strings to the file
fh.write('%6i\n%6i%74s\n' % (-1,15,' '))
for ii in range(0,n):
fh.write('%10i%10i%10i%10i%13.4e%13.4e%13.4e\n' % (dset['node_nums'][ii],dset['def_cs'][ii],dset['disp_cs'][ii],dset['color'][ii],
dset['x'][ii],dset['y'][ii],dset['z'][ii]) )
fh.write('%6i\n' % -1)
except KeyError as msg:
raise UFFException('The required key \''+msg.args[0]+'\' not present when writing data-set #15')
except:
raise UFFException('Error writing data-set #15')
def _write82(self,fh,dset):
# Writes line data - data-set 82 - to an open file fh
try:
# handle optional fields
dset = self._opt_fields(dset,{'id':'NONE',\
'color':0})
# write strings to the file
#removed jul 2017: unique_nodes = set(dset['nodes'])
#removed jul 2017:if 0 in unique_nodes: unique_nodes.remove(0)
# number of changes of node need to
#nNodes = len(dset['nodes'])
nNodes = np.sum((dset['nodes'][1:] - dset['nodes'][:-1])!=0) + 1
fh.write('%6i\n%6i%74s\n' % (-1,82,' '))
fh.write('%10i%10i%10i\n' % (dset['trace_num'],nNodes,dset['color']))
fh.write('%-80s\n' % dset['id'])
sl = 0
n8Blocks = nNodes//8
remLines = nNodes%8
if n8Blocks:
for ii in range(0,n8Blocks):
# fh.write( string.join(['%10i'%lineN for lineN in dset['lines'][sl:sl+8]],'')+'\n' )
fh.write( ''.join(['%10i'%lineN for lineN in dset['nodes'][sl:sl+8]])+'\n' )
sl += 8
if remLines > 0:
fh.write( ''.join(['%10i'%lineN for lineN in dset['nodes'][sl:]])+'\n' )
# fh.write( string.join(['%10i'%lineN for lineN in dset['lines'][sl:]],'')+'\n' )
fh.write('%6i\n' % -1)
except KeyError as msg:
raise UFFException('The required key \''+msg.args[0]+'\' not present when writing data-set #82')
except:
raise UFFException('Error writing data-set #82')
def _write151(self,fh,dset):
# Writes dset data - data-set 151 - to an open file fh
try:
ds = time.strftime('%d-%b-%y',time.localtime())
ts = time.strftime('%H:%M:%S',time.localtime())
# handle optional fields
dset = self._opt_fields(dset,{'version_db1':'0',\
'version_db2':'0',\
'file_type':'0',\
'date_db_created':ds,\
'time_db_created':ts,\
'date_db_saved':ds,\
'time_db_saved':ts,\
'date_file_written':ds,\
'time_file_written':ts})
# write strings to the file
fh.write('%6i\n%6i%74s\n' % (-1,151,' '))
fh.write('%-80s\n' % dset['model_name'])
fh.write('%-80s\n' % dset['description'])
fh.write('%-80s\n' % dset['db_app'])
fh.write('%-10s%-10s%10s%10s%10s\n' % (dset['date_db_created'],\
dset['time_db_created'],dset['version_db1'],dset['version_db2'],dset['file_type']) )
fh.write('%-10s%-10s\n' % (dset['date_db_saved'],dset['time_db_saved']))
fh.write('%-80s\n' % dset['program'])
fh.write('%-10s%-10s\n' % (dset['date_file_written'],dset['time_file_written']))
fh.write('%6i\n' % -1)
except KeyError as msg:
raise UFFException('The required key \''+msg.args[0]+'\' not present when writing data-set #151')
except:
raise UFFException('Error writing data-set #151')
def _write164(self,fh,dset):
# Writes units data - data-set 164 - to an open file fh
try:
# handle optional fields
dset = self._opt_fields(dset,{'units_description':'User unit system',\
'temp_mode':1})
# write strings to the file
fh.write('%6i\n%6i%74s\n' % (-1,164,' '))
fh.write('%10i%20s%10i\n' % (dset['units_code'],dset['units_description'],dset['temp_mode']))
str = '%25.16e%25.16e%25.16e\n%25.16e\n' % (dset['length'],dset['force'],dset['temp'],dset['temp_offset'])
str = str.replace('e+','D+')
str = str.replace('e-','D-')
fh.write(str)
fh.write('%6i\n' % -1)
except KeyError as msg:
raise UFFException('The required key \''+msg.args[0]+'\' not present when writing data-set #164')
except:
raise UFFException('Error writing data-set #164')
def _write55(self,fh,dset):
# Writes data at nodes - data-set 55 - to an open file fh. Currently:
# - only normal mode (2)
# - complex eigenvalue first order (displacement) (3)
# - frequency response and (5)
# - complex eigenvalue second order (velocity) (7)
# analyses are supported.
try:
# Handle general optional fields
dset = self._opt_fields(dset,\
{'units_description':' ',\
'id1':'NONE',\
'id2':'NONE',\
'id3':'NONE',\
'id4':'NONE',\
'id5':'NONE',\
'model_type':1})
#... and some data-type specific optional fields
if dset['analysis_type'] == 2:
# normal modes
dset = self._opt_fields(dset,\
{'modal_m':0,\
'modal_damp_vis':0,\
'modal_damp_his':0})
elif dset['analysis_type'] in (3,7):
# complex modes
dset = self._opt_fields(dset,\
{'modal_b':0.0+0.0j,\
'modal_a':0.0+0.0j})
if not np.iscomplexobj(dset['modal_a']):
dset['modal_a'] = dset['modal_a'] + 0.j
if not np.iscomplexobj(dset['modal_b']):
dset['modal_b'] = dset['modal_b'] + 0.j
elif dset['analysis_type'] == 5:
# frequency response
pass
else:
# unsupported analysis type
raise UFFException('Error writing data-set #55: unsupported analysis type')
# Some additional checking
dataType = 2
# if dset.has_key('r4') and dset.has_key('r5') and dset.has_key('r6'):
if ('r4' in dset) and ('r5' in dset) and ('r6' in dset):
nDataPerNode = 6
if np.iscomplexobj(dset['r1']):
nDataPerNode = 3
dataType = 5
# Write strings to the file
fh.write('%6i\n%6i%74s\n' % (-1,55,' '))
fh.write('%-80s\n' % dset['id1'])
fh.write('%-80s\n' % dset['id2'])
fh.write('%-80s\n' % dset['id3'])
fh.write('%-80s\n' % dset['id4'])
fh.write('%-80s\n' % dset['id5'])
fh.write('%10i%10i%10i%10i%10i%10i\n' %
(dset['model_type'],dset['analysis_type'],dset['data_ch'],
dset['spec_data_type'],dataType,nDataPerNode))
if dset['analysis_type'] == 2:
# Normal modes
fh.write('%10i%10i%10i%10i\n' % (2,4,dset['load_case'],dset['mode_n']))
fh.write('%13.4e%13.4e%13.4e%13.4e\n' % (dset['freq'],dset['modal_m'],
dset['modal_damp_vis'],dset['modal_damp_his']))
elif dset['analysis_type'] == 5:
# Frequenc response
fh.write('%10i%10i%10i%10i\n' % (2,1,dset['load_case'],dset['freq_step_n']))
fh.write('%13.4e\n' % dset['freq'])
elif (dset['analysis_type'] == 3) or (dset['analysis_type'] == 7):
# Complex modes
fh.write('%10i%10i%10i%10i\n' % (2,6,dset['load_case'],dset['mode_n']))
fh.write('%13.4e%13.4e%13.4e%13.4e%13.4e%13.4e\n' % (
dset['eig'].real,dset['eig'].imag,dset['modal_a'].real,dset['modal_a'].imag,
dset['modal_b'].real,dset['modal_b'].imag))
else:
raise UFFException('Unsupported analysis type')
n = len(dset['node_nums'])
if dataType == 2:
# Real data
if nDataPerNode == 3:
for k in range(0,n):
fh.write('%10i\n' % dset['node_nums'][k])
fh.write('%13.4e%13.4e%13.4e\n' % (dset['r1'][k],dset['r2'][k],dset['r3'][k]))
else:
for k in range(0,n):
fh.write('%10i\n' % dset['node_nums'][k])
fh.write('%13.4e%13.4e%13.4e%13.4e%13.4e%13.4e\n' %
(dset['r1'][k],dset['r2'][k],dset['r3'][k],dset['r4'][k],dset['r5'][k],dset['r6'][k]))
elif dataType == 5:
# Complex data; n_data_per_node is assumed being 3
for k in range(0,n):
fh.write('%10i\n' % dset['node_nums'][k])
fh.write('%13.4e%13.4e%13.4e%13.4e%13.4e%13.4e\n' %
(dset['r1'][k].real,dset['r1'][k].imag,dset['r2'][k].real,dset['r2'][k].imag,dset['r3'][k].real,dset['r3'][k].imag))
else:
raise UFFException('Unsupported data type')
fh.write('%6i\n' % -1)
except KeyError as msg:
raise UFFException('The required key \''+msg.args[0]+'\' not present when writing data-set #55')
except:
raise UFFException('Error writing data-set #55')
def _write58(self,fh,dset, mode='add'):
# Writes function at nodal DOF - data-set 58 - to an open file fh.
try:
if not (dset['func_type'] in [1,2,3,4,6]):
raise UFFException('Unsupported function type')
# handle optional fields - only those that are not calculated
# automatically
dict = {'units_description':'',\
'id1':'NONE',\
'id2':'NONE',\
'id3':'NONE',\
'id4':'NONE',\
'id5':'NONE',\
'func_id':0,\
'ver_num':0,\
'binary':0,\
'load_case_id':0,\
'rsp_ent_name':'NONE',\
'ref_ent_name':'NONE',\
'abscissa_axis_lab': 'NONE',\
'abscissa_axis_units_lab':'NONE',\
'abscissa_len_unit_exp':0,\
'abscissa_force_unit_exp':0,\
'abscissa_temp_unit_exp':0,\
'ordinate_len_unit_exp':0,\
'ordinate_force_unit_exp':0,\
'ordinate_temp_unit_exp':0,\
'ordinate_axis_lab':'NONE',\
'ordinate_axis_units_lab':'NONE',\
'orddenom_len_unit_exp':0,\
'orddenom_force_unit_exp':0,\
'orddenom_temp_unit_exp':0,\
'orddenom_axis_lab':'NONE',\
'orddenom_spec_data_type':'NONE',\
'orddenom_axis_units_lab':'NONE',\
'z_axis_len_unit_exp':0,\
'z_axis_force_unit_exp':0,\
'z_axis_temp_unit_exp':0,\
'z_axis_axis_lab':'NONE',\
'z_axis_axis_units_lab':'NONE',\
'z_axis_value':0,\
'spec_data_type':0,\
'abscissa_spec_data_type':0,\
'ordinate_spec_data_type':0,\
'orddenom_spec_data_type':0,\
'z_axis_spec_data_type':0,\
'version_num':0,
'abscissa_spacing':0}
dset = self._opt_fields(dset,dict)
# Write strings to the file - always in double precision => ord_data_type = 2
# for real data and 6 for complex data
numPts = len(dset['data'])
isR = not np.iscomplexobj(dset['data'])
if isR:
# real data
dset['ord_data_type'] = 4
nBytes = numPts*4
ordDataType = 4
else:
# complex data
dset['ord_data_type'] = 6
nBytes = numPts*8
ordDataType = 6
isEven = bool(dset['abscissa_spacing']) # handling even/uneven abscissa spacing manually
# handling abscissa spacing automatically
# isEven = len( set( [ dset['x'][ii]-dset['x'][ii-1] for ii in range(1,len(dset['x'])) ] ) ) == 1
dset['abscissa_min'] = dset['x'][0]
dx = dset['x'][1] - dset['x'][0]
fh.write('%6i\n%6i' % (-1,58))
if dset['binary']:
if sys.byteorder == 'little': bo = 1
else: bo = 2
fh.write('b%6i%6i%12i%12i%6i%6i%12i%12i\n' % (bo,2,11,nBytes,0,0,0,0))
else:
fh.write('%74s\n' % ' ')
fh.write('%-80s\n' % dset['id1'])
fh.write('%-80s\n' % dset['id2'])
fh.write('%-80s\n' % dset['id3'])
fh.write('%-80s\n' % dset['id4'])
fh.write('%-80s\n' % dset['id5'])
fh.write('%5i%10i%5i%10i %10s%10i%4i %10s%10i%4i\n' %
(dset['func_type'],dset['func_id'],dset['ver_num'],dset['load_case_id'],
dset['rsp_ent_name'],dset['rsp_node'],dset['rsp_dir'],dset['ref_ent_name'],
dset['ref_node'],dset['ref_dir']))
fh.write('%10i%10i%10i%13.4e%13.4e%13.4e\n' % (ordDataType, numPts, isEven,
isEven*dset['abscissa_min'], isEven*dx, dset['z_axis_value']))
fh.write('%10i%5i%5i%5i %-20s %-20s\n' % (dset['abscissa_spec_data_type'],
dset['abscissa_len_unit_exp'],dset['abscissa_force_unit_exp'],
dset['abscissa_temp_unit_exp'],dset['abscissa_axis_lab'],
dset['abscissa_axis_units_lab']))
fh.write('%10i%5i%5i%5i %-20s %-20s\n' % (dset['ordinate_spec_data_type'],
dset['ordinate_len_unit_exp'],dset['ordinate_force_unit_exp'],
dset['ordinate_temp_unit_exp'],dset['ordinate_axis_lab'],
dset['ordinate_axis_units_lab']))
fh.write('%10i%5i%5i%5i %-20s %-20s\n' % (dset['orddenom_spec_data_type'],
dset['orddenom_len_unit_exp'],dset['orddenom_force_unit_exp'],
dset['orddenom_temp_unit_exp'],dset['orddenom_axis_lab'],
dset['orddenom_axis_units_lab']))
fh.write('%10i%5i%5i%5i %-20s %-20s\n' % (dset['z_axis_spec_data_type'],
dset['z_axis_len_unit_exp'],dset['z_axis_force_unit_exp'],
dset['z_axis_temp_unit_exp'],dset['z_axis_axis_lab'],
dset['z_axis_axis_units_lab']))
if isR:
if isEven:
data = dset['data'].copy()
else:
data = np.zeros(2 * numPts, 'd')
data[0:-1:2] = dset['x']
data[1::2] = dset['data']
else:
if isEven:
data = np.zeros(2 * numPts, 'd')
data[0:-1:2] = dset['data'].real
data[1::2] = dset['data'].imag
else:
data = np.zeros(3 * numPts, 'd')
data[0:-2:3] = dset['x']
data[1:-1:3] = dset['data'].real
data[2::3] = dset['data'].imag
# always write data in double precision
if dset['binary']:
fh.close()
if mode.lower() == 'overwrite':
fh = open(self._fileName, 'wb')
elif mode.lower() == 'add':
fh = open(self._fileName, 'ab')
#write data
if bo == 1:
[fh.write(struct.pack('<d', datai)) for datai in data]
else:
[fh.write(struct.pack('>d', datai)) for datai in data]
fh.close()
if mode.lower() == 'overwrite':
fh = open(self._fileName, 'wt')
elif mode.lower() == 'add':
fh = open(self._fileName, 'at')
else:
n4Blocks = len(data)//4
remVals = len(data)%4
if isR:
if isEven:
fh.write( n4Blocks*'%20.11e%20.11e%20.11e%20.11e\n'%tuple(data[:4*n4Blocks]) )
if remVals > 0:
fh.write( (remVals*'%20.11e'+'\n') % tuple(data[4*n4Blocks:]) )
else:
fh.write( n4Blocks*'%13.4e%20.11e%13.4e%20.11e\n'%tuple(data[:4*n4Blocks]) )
if remVals > 0:
fmt = ['%13.4e','%20.11e','%13.4e','%20.11e']
fh.write( (''.join(fmt[remVals])+'\n') % tuple(data[4*n4Blocks:]) )
else:
if isEven:
fh.write( n4Blocks*'%20.11e%20.11e%20.11e%20.11e\n'%tuple(data[:4*n4Blocks]) )
if remVals > 0:
fh.write( (remVals*'%20.11e'+'\n') % tuple(data[4*n4Blocks:]) )
else:
n3Blocks = len(data)/3
remVals = len(data)%3
# TODO: It breaks here for long measurements. Implement exceptions.
# n3Blocks seems to be a natural number but of the wrong type. Convert for now,
# but make assertion to prevent werid things from happening.
if float(n3Blocks - int(n3Blocks)) != 0.0:
print('Warning: Something went wrong when savning the uff file.')
n3Blocks = int(n3Blocks)
fh.write( n3Blocks*'%13.4e%20.11e%20.11e\n'%tuple(data[:3*n3Blocks]) )
if remVals > 0:
fmt = ['%13.4e','%20.11e','%20.11e']
fh.write( (''.join(fmt[remVals])+'\n') % tuple(data[3*n3Blocks:]) )
fh.write('%6i\n' % -1)
del data
except KeyError as msg:
raise UFFException('The required key \''+msg.args[0]+'\' not present when writing data-set #58')
except:
raise UFFException('Error writing data-set #58')
def _write2411(self,fh,dset):
try:
dict = {'export_cs_number':0,\
'cs_color':8}
dset = self._opt_fields(dset,dict)
fh.write('%6i\n%6i%74s\n' % (-1,2411,' '))
for node in range(dset['grid_global'].shape[0]):
fh.write('%10i%10i%10i%10i\n' %(dset['grid_global'][node,0], dset['export_cs_number'],
dset['grid_global'][node,0], dset['cs_color']))
fh.write('%25.16e%25.16e%25.16e\n' %tuple(dset['grid_global'][node,1:]))
fh.write('%6i\n' % -1)
except:
raise UFFException('Error writing data-set #2411')