forked from anick107/nastran_pch_reader
-
Notifications
You must be signed in to change notification settings - Fork 0
/
nastran_pch_reader.py
390 lines (302 loc) · 14.5 KB
/
nastran_pch_reader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
import cmath
import matplotlib.pyplot as plt
import csv
CONST_VALID_REQUESTS = ['ACCELERATION', 'DISPLACEMENTS', 'MPCF', 'SPCF', 'ELEMENT FORCES', 'ELEMENT STRAINS']
def dispatch_parse(output, data_chunks):
if output == 'MAGNITUDE-PHASE' or output == 'REAL-IMAGINARY':
num = int(len(data_chunks) / 2)
if len(data_chunks) % 2 != 0:
raise ValueError('Wrong number of chunks!', 'Output: %s, num of chunks: %d' % (output, len(data_chunks)))
else:
num = len(data_chunks)
if output == 'MAGNITUDE-PHASE':
return [data_chunks[i]*cmath.exp(1j*data_chunks[i+num]*cmath.pi/180.0) for i in range(num)]
elif output == 'REAL-IMAGINARY':
return [data_chunks[i] + 1j*data_chunks[i+num] for i in range(num)]
else:
return [data_chunks[i] for i in range(num)]
class PchParser:
def reset_current_frame(self):
self.cur_data_chunks = []
self.is_frequency_response = False
self.output_sort = 0
self.cur_subcase = 0
self.cur_output = 0
self.current_frequency = 0
self.cur_entity_id = 0
self.cur_entity_type_id = 0
def __init__(self, filename):
# define the dictionary
self.parsed_data = {'FREQUENCY': {}, 'SUBCASES': set()}
for request in CONST_VALID_REQUESTS:
self.parsed_data[request] = {}
# initiate current frame
self.reset_current_frame()
is_header = True
# start reading
with open(filename, 'r') as pch:
# read only first 72 characters from the punch file
for line in pch:
line = line[0:72]
# reset all variables
if line.startswith('$TITLE ='):
is_header = False
# insert the last frame remaining in memory
self.insert_current_frame()
# reset the frame
self.reset_current_frame()
# skip everything before TITLE
if is_header:
continue
# parse the subcase
if line.startswith('$SUBCASE ID =') or \
line.startswith('$RANDOM ID ='):
self.cur_subcase = int(line[13:].strip())
self.parsed_data['SUBCASES'].add(self.cur_subcase)
# identify NASTRAN request
if line.startswith('$DISPLACEMENTS'):
self.cur_request = 'DISPLACEMENTS'
elif line.startswith('$ACCELERATION'):
self.cur_request = 'ACCELERATION'
elif line.startswith('$MPCF'):
self.cur_request = 'MPCF'
elif line.startswith('$SPCF'):
self.cur_request = 'SPCF'
elif line.startswith('$ELEMENT FORCES'):
self.cur_request = 'ELEMENT FORCES'
elif line.startswith('$ELEMENT STRAINS'):
self.cur_request = 'ELEMENT STRAINS'
# identify output type
if line.startswith('$REAL-IMAGINARY OUTPUT'):
self.cur_output = 'REAL-IMAGINARY'
elif line.startswith('$MAGNITUDE-PHASE OUTPUT'):
self.cur_output = 'MAGNITUDE-PHASE'
elif line.startswith('REAL OUTPUT'):
self.cur_output = 'REAL'
# parse of frequency response results
if line.find('IDENTIFIED BY FREQUENCY') != -1:
self.is_frequency_response = True
self.output_sort = 2
elif line.find('$FREQUENCY =') != -1:
self.is_frequency_response = True
self.output_sort = 1
# parse entity id
if line.startswith('$POINT ID ='):
self.cur_entity_id = int(line[11:23].strip())
elif line.startswith('$ELEMENT ID ='):
self.cur_entity_id = int(line[13:23].strip())
elif line.startswith('$FREQUENCY = '):
self.current_frequency = float(line[12:28].strip())
# parse element type
if line.startswith('$ELEMENT TYPE ='):
self.cur_entity_type_id = int(line[15:27].strip())
# ignore other comments
if line.startswith('$'):
continue
# check if everything ok
self.validate()
# start data parsing
line = line.replace('G', ' ')
if line.startswith('-CONT-'):
line = line.replace('-CONT-', '')
self.cur_data_chunks += [float(_) for _ in line.split()]
else:
# insert the last frame
self.insert_current_frame()
# update the last frame with a new data
self.cur_data_chunks = [float(_) for _ in line.split()]
# last block remaining in memory
self.insert_current_frame()
def validate(self):
if self.cur_request not in CONST_VALID_REQUESTS:
raise NotImplementedError("Request %s is not implemented", self.cur_request)
if self.cur_request == 'ELEMENT FORCES' and self.cur_entity_type_id not in [12, 102]:
raise NotImplementedError("Element forces parser is implemented only for CELAS2 and CBUSH elements!")
def insert_current_frame(self):
# last block remaining in memory
if len(self.cur_data_chunks) > 0:
# ensure that subcase is allocated in the dataset
if self.cur_subcase not in self.parsed_data[self.cur_request]:
self.parsed_data[self.cur_request][self.cur_subcase] = {}
self.parsed_data['FREQUENCY'][self.cur_subcase] = {}
values = dispatch_parse(self.cur_output, self.cur_data_chunks[1:])
if self.is_frequency_response:
# incremented by frequency, entity is given
if self.output_sort == 2:
self.current_frequency = self.cur_data_chunks[0]
# incremented by entity, frequency is given
elif self.output_sort == 1:
self.cur_entity_id = int(self.cur_data_chunks[0])
# insert frequency in the database
if self.current_frequency not in self.parsed_data['FREQUENCY'][self.cur_subcase]:
self.parsed_data['FREQUENCY'][self.cur_subcase][self.current_frequency] = \
len(self.parsed_data['FREQUENCY'][self.cur_subcase])
# ensure that dictionary for the entity exists
if self.cur_entity_id not in self.parsed_data[self.cur_request][self.cur_subcase]:
self.parsed_data[self.cur_request][self.cur_subcase][self.cur_entity_id] = []
self.parsed_data[self.cur_request][self.cur_subcase][self.cur_entity_id].append(values)
else:
self.cur_entity_id = int(self.cur_data_chunks[0])
self.parsed_data[self.cur_request][self.cur_subcase][self.cur_entity_id] = values
def health_check(self):
frequency_steps = []
for subcase in self.parsed_data['SUBCASES']:
frequency_steps.append(len(self.parsed_data['FREQUENCY'][subcase]))
assert min(frequency_steps) == max(frequency_steps)
def get_subcases(self):
return sorted(self.parsed_data['SUBCASES'])
def __get_data_per_request(self, request, subcase):
self.health_check()
if subcase in self.parsed_data[request]:
return self.parsed_data[request][subcase]
else:
raise KeyError('%s data for subase %s is not found' % (request, subcase))
def get_accelerations(self, subcase, entityID=None, component=None):
if (entityID != component) and (entityID == None or component == None):
raise KeyError("Need both Entity ID and direction component")
elif entityID == None:
return self.__get_data_per_request('ACCELERATION', subcase)
elif component == 'tx':
tx_data = []
for each in self.get_accelerations(subcase)[entityID]:
tx_data.append(each[0])
return tx_data
elif component == 'ty':
ty_data = []
for each in self.get_accelerations(subcase)[entityID]:
ty_data.append(each[1])
return ty_data
elif component == 'tz':
tz_data = []
for each in self.get_accelerations(subcase)[entityID]:
tz_data.append(each[2])
return tz_data
elif component == 'rx':
rx_data = []
for each in self.get_accelerations(subcase)[entityID]:
rx_data.append(each[3])
return rx_data
elif component == 'ry':
ry_data = []
for each in self.get_accelerations(subcase)[entityID]:
ry_data.append(each[4])
return ry_data
elif component == 'rz':
rz_data = []
for each in self.get_accelerations(subcase)[entityID]:
rz_data.append(each[5])
return rz_data
def get_displacements(self, subcase):
return self.__get_data_per_request('DISPLACEMENTS', subcase)
def get_mpcf(self, subcase):
return self.__get_data_per_request('MPCF', subcase)
def get_spcf(self, subcase):
return self.__get_data_per_request('SPCF', subcase)
def get_forces(self, subcase):
return self.__get_data_per_request('ELEMENT FORCES', subcase)
def get_frequencies(self, subcase):
return sorted(self.parsed_data['FREQUENCY'][subcase])
class SimplePch:
"""Parser for a simple punch file with no `$SUBCASE` headers and a single
data type (e.g. acceleration, displacement, etc.).
The file may have multiple entities (i.e. nodes or elements), but only a
single load case. This type of punch file results from ommiting the PUNCH
callout in the case control load request meanwhile adding an
OUTPUT(XYPLOT) statement requesting punch results """
def __init__(self, filename):
# initiate dictionary
self.entitylist = []
self.data = {}
# start reading
with open(filename, 'r') as pch:
# read only first 72 characters from the punch file
for line in pch:
line = line[0:72]
# check for new entity ID
if line.startswith('$SUBCASE'):
continue
elif line.startswith('$'):
entityID = int([i for i in line.split(' ') if i != ''][2])
# creates domain and range dictionary for entityID
self.data[entityID] = {'domain': [], 'range': []}
self.entitylist.append(entityID)
# add data points to the entity
elif line.startswith(' '):
# parse line
datapoint = [float(i) for i in line.split(' ') if i != '']
del datapoint[0]
#add data to class data attribute
self.data[entityID]['domain'].append(datapoint[0])
self.data[entityID]['range'].append(datapoint[1])
def get_entity_list(self):
"""Get a list of each entity (node, element, etc.) in the simple
punch file."""
return(self.entitylist)
def get_domain(self, entityID):
"""Get a list of all values in the domain of the simple punch file"""
return(self.data[entityID]['domain'])
def get_range(self, entityID):
"""Get a list of all values in the range of the simple punch file"""
return(self.data[entityID]['range'])
def get_plot(self, entityID, fig_num=1, xscale='linear', yscale='linear'):
"""Plot data for single entity using matplotlib.pyplot
Returns:
matplotlib.pyplot.plot object
"""
# create figure
plt.figure(fig_num)
# define domain and range
plot_domain = self.get_domain(entityID)
plot_range = self.get_range(entityID)
# plot domain and range to figure
plt.plot(plot_domain, plot_range, label=entityID)
# setup plot
plt.legend(loc='upper left')
plt.figure(fig_num).add_subplot(1,1,1).set_xscale(xscale)
plt.figure(fig_num).add_subplot(1,1,1).set_yscale(yscale)
# show figure
return plt.figure(fig_num)
def get_plot_all(self, fig_num, xscale='linear', yscale='linear'):
"""Plot data for all entities on single plot using
matplotlib.pyplot
Returns:
matplotlib.pyplot.plot object
"""
for each in self.get_entity_list():
self.get_plot(each, fig_num)
# setup plot
plt.legend(loc='upper left')
plt.figure(fig_num).add_subplot(1,1,1).set_xscale(xscale)
plt.figure(fig_num).add_subplot(1,1,1).set_yscale(yscale)
def export(self, targetfile, stacked=True):
"""Export data to CSV file"""
if stacked:
with open(targetfile, 'w', newline='') as csvfile:
fields = ['domain'] + list(self.data.keys())
writer = csv.DictWriter(csvfile,
fieldnames=fields,
dialect='excel',
delimiter=',')
writer.writeheader()
firstdomain = self.get_domain(self.get_entity_list()[0])
for x, y in enumerate(firstdomain):
d = {'domain': y}
for entity in self.data:
d[entity] = self.data[entity]['range'][x]
writer.writerow(d)
else:
with open(targetfile, 'w', newline='') as csvfile:
fields = ['entity', 'domain', 'range']
writer = csv.DictWriter(csvfile,
fieldnames=fields,
dialect='excel',
delimiter=',')
writer.writeheader()
for entity in self.get_entity_list():
d = self.get_domain(entity)
r = self.get_range(entity)
# write in each row
for i, j in zip(d, r):
writer.writerow({'entity': entity,
'domain': i,
'range': j})