Coverage for src / sdynpy / fileio / sdynpy_rattlesnake.py: 3%
616 statements
« prev ^ index » next coverage.py v7.13.4, created at 2026-03-11 16:22 +0000
« prev ^ index » next coverage.py v7.13.4, created at 2026-03-11 16:22 +0000
1# -*- coding: utf-8 -*-
2"""
3Load in time data from Rattlesnake runs
4"""
5"""
6Copyright 2022 National Technology & Engineering Solutions of Sandia,
7LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
8Government retains certain rights in this software.
10This program is free software: you can redistribute it and/or modify
11it under the terms of the GNU General Public License as published by
12the Free Software Foundation, either version 3 of the License, or
13(at your option) any later version.
15This program is distributed in the hope that it will be useful,
16but WITHOUT ANY WARRANTY; without even the implied warranty of
17MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18GNU General Public License for more details.
20You should have received a copy of the GNU General Public License
21along with this program. If not, see <https://www.gnu.org/licenses/>.
22"""
24import netCDF4 as nc4
25import numpy as np
26from ..core.sdynpy_coordinate import (coordinate_array, outer_product,
27 CoordinateArray, _string_map)
28from ..core.sdynpy_data import data_array, FunctionTypes
29from ..core.sdynpy_system import System
30import pandas as pd
31import sys
32import openpyxl as opxl
33import os
34import warnings
37def read_rattlesnake_output(file, coordinate_override_column=None, read_only_indices=None,
38 read_variable='time_data', abscissa_start = None,
39 abscissa_stop = None, downsample = None):
40 """
41 Reads in a Rattlesnake data file and returns the time history array as well
42 as the channel table
44 Parameters
45 ----------
46 file : str or nc4.Dataset
47 Path to the file to read in or an already open
48 coordinate_override_column : str, optional
49 Specify a channel table column name to extract coordinate information from.
50 If not specified, then it will be assembled from node id and directions.
51 read_only_indices : slice or iterable, optional
52 A valid indexing operation to select which channel indices to read
53 read_variable : str, optional
54 The time variable from the Rattlesnake file to read. These will
55 generally be time_data, time_data_1, time_data_2, etc. depending on
56 how many streams exist in the file. The default is 'time_data'.
57 abscissa_start : float, optional
58 Data will not be extracted for abscissa values less than this value
59 abscissa_stop : float, optional
60 Data will not be extracted for abscissa values greater than this value
61 downsample : int, optional
62 A step size to use to downsample the dataset when reading
64 Returns
65 -------
66 data_array : TimeHistoryArray
67 Time history data in the Rattlesnake output file
68 channel_table : DataFrame
69 Pandas Dataframe containing the channel table information
71 """
72 if isinstance(file, str):
73 ds = nc4.Dataset(file, 'r')
74 elif isinstance(file, nc4.Dataset):
75 ds = file
76 if read_only_indices is None:
77 read_only_indices = slice(None)
78 if abscissa_start is None:
79 start_index = None
80 else:
81 start_index = int(np.ceil(abscissa_start * ds.sample_rate))
82 if abscissa_stop is None:
83 stop_index = None
84 else:
85 stop_index = int(np.ceil(abscissa_stop * ds.sample_rate))
86 abscissa_slice = slice(start_index, stop_index, downsample)
87 output_data = np.array(ds[read_variable][:,abscissa_slice][read_only_indices])
88 abscissa = np.arange(0 if start_index is None else start_index,
89 ds[read_variable].shape[-1] if stop_index is None else stop_index,
90 1 if downsample is None else downsample) / ds.sample_rate
91 if coordinate_override_column is None:
92 nodes = [int(''.join(char for char in node if char in '0123456789'))
93 for node in ds['channels']['node_number'][...][read_only_indices]]
94 directions = np.array(ds['channels']['node_direction'][...][read_only_indices], dtype='<U3')
95 coordinates = coordinate_array(nodes, directions)[:, np.newaxis]
96 else:
97 coordinates = coordinate_array(string_array=ds['channels'][coordinate_override_column][read_only_indices])[
98 :, np.newaxis]
99 array = {name: np.array(variable[:]) for name, variable in ds['channels'].variables.items()}
100 channel_table = pd.DataFrame(array)
101 comment1 = np.char.add(np.char.add(np.array(ds['channels']['channel_type'][...][read_only_indices], dtype='<U80'),
102 np.array(' :: ')),
103 np.array(ds['channels']['unit'][...][read_only_indices], dtype='<U80'))
104 comment2 = np.char.add(np.char.add(np.array(ds['channels']['physical_device'][...][read_only_indices], dtype='<U80'),
105 np.array(' :: ')),
106 np.array(ds['channels']['physical_channel'][...][read_only_indices], dtype='<U80'))
107 comment3 = np.char.add(np.char.add(np.array(ds['channels']['feedback_device'][...][read_only_indices], dtype='<U80'),
108 np.array(' :: ')),
109 np.array(ds['channels']['feedback_channel'][...][read_only_indices], dtype='<U80'))
110 comment4 = np.array(ds['channels']['comment'][...][read_only_indices], dtype='<U80')
111 comment5 = np.array(ds['channels']['make'][...][read_only_indices], dtype='<U80')
112 for key in ('model', 'serial_number', 'triax_dof'):
113 comment5 = np.char.add(comment5, np.array(' '))
114 comment5 = np.char.add(comment5, np.array(ds['channels'][key][...][read_only_indices], dtype='<U80'))
115 time_data = data_array(FunctionTypes.TIME_RESPONSE,
116 abscissa,
117 output_data,
118 coordinates,
119 comment1,
120 comment2,
121 comment3,
122 comment4,
123 comment5)
124 if isinstance(file, str):
125 ds.close()
126 return time_data, channel_table
129def read_system_id_data(file):
130 if isinstance(file, str):
131 file = np.load(file)
132 df = file['sysid_frequency_spacing']
133 if np.isnan(file['response_transformation_matrix']):
134 try:
135 response_dofs = coordinate_array(
136 [int(v) for v in file['channel_node_number'][file['response_indices']]],
137 file['channel_node_direction'][file['response_indices']])
138 except Exception:
139 response_dofs = coordinate_array(file['response_indices']+1, 0)
140 else:
141 response_dofs = coordinate_array(np.arange(file['response_transformation_matrix'].shape[0])+1, 0)
142 if np.isnan(file['reference_transformation_matrix']):
143 try:
144 reference_dofs = coordinate_array(
145 [int(v) for v in file['channel_node_number'][file['reference_indices']]],
146 file['channel_node_direction'][file['reference_indices']])
147 except Exception:
148 reference_dofs = coordinate_array(file['reference_indices']+1, 0)
149 else:
150 reference_dofs = coordinate_array(np.arange(file['reference_transformation_matrix'].shape[0])+1, 0)
151 ordinate = np.moveaxis(file['frf_data'], 0, -1)
152 frfs = data_array(FunctionTypes.FREQUENCY_RESPONSE_FUNCTION,
153 df*np.arange(ordinate.shape[-1]), ordinate,
154 outer_product(response_dofs, reference_dofs))
155 ordinate = np.moveaxis(file['response_cpsd'], 0, -1)
156 response_cpsd = data_array(FunctionTypes.POWER_SPECTRAL_DENSITY,
157 df*np.arange(ordinate.shape[-1]), ordinate,
158 outer_product(response_dofs, response_dofs))
159 ordinate = np.moveaxis(file['reference_cpsd'], 0, -1)
160 reference_cpsd = data_array(FunctionTypes.POWER_SPECTRAL_DENSITY,
161 df*np.arange(ordinate.shape[-1]), ordinate,
162 outer_product(reference_dofs, reference_dofs))
163 ordinate = np.moveaxis(file['response_noise_cpsd'], 0, -1)
164 response_noise_cpsd = data_array(FunctionTypes.POWER_SPECTRAL_DENSITY,
165 df*np.arange(ordinate.shape[-1]), ordinate,
166 outer_product(response_dofs, response_dofs))
167 ordinate = np.moveaxis(file['reference_noise_cpsd'], 0, -1)
168 reference_noise_cpsd = data_array(FunctionTypes.POWER_SPECTRAL_DENSITY,
169 df*np.arange(ordinate.shape[-1]), ordinate,
170 outer_product(reference_dofs, reference_dofs))
171 ordinate = np.moveaxis(file['coherence'], 0, -1)
172 coherence = data_array(FunctionTypes.MULTIPLE_COHERENCE,
173 df*np.arange(ordinate.shape[-1]), ordinate,
174 outer_product(response_dofs))
175 return frfs, response_cpsd, reference_cpsd, response_noise_cpsd, reference_noise_cpsd, coherence
177def read_system_id_nc4(file, coordinate_override_column=None):
178 if isinstance(file,str):
179 ds = nc4.Dataset(file,'r')
180 elif isinstance(file,nc4.Dataset):
181 ds = file
183 environment = [group for group in ds.groups if not group == 'channels'][0]
185 # Get the channels in the group
186 if coordinate_override_column is None:
187 nodes = [int(''.join(char for char in node if char in '0123456789'))
188 for node in ds['channels']['node_number']]
189 directions = np.array(ds['channels']['node_direction'][:], dtype='<U3')
190 coordinates = coordinate_array(nodes, directions)
191 else:
192 coordinates = coordinate_array(string_array=ds['channels'][coordinate_override_column])
193 drives = ds['channels']['feedback_device'][:] != ''
195 # Cull down to just those in the environment
196 environment_index = np.where(ds['environment_names'][:] == environment)[0][0]
197 environment_channels = ds['environment_active_channels'][:, environment_index].astype(bool)
199 drives = drives[environment_channels]
200 coordinates = coordinates[environment_channels]
202 control_indices = ds[environment]['control_channel_indices'][:]
204 if 'response_transformation_matrix' in ds[environment].variables:
205 control_coordinates = coordinate_array(np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1,0)
206 response_transform_comment1 = np.array([f'Unknown :: Transformed Response {i}' for i in np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1],dtype='<U80')
207 response_transform_comment2 = np.array([f'Transformed Response {i} :: Transformed Response {i}' for i in np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1],dtype='<U80')
208 response_transform_comment3 = np.array([f'Transformed Response {i} :: Transformed Response {i}' for i in np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1],dtype='<U80')
209 response_transform_comment4 = np.array([f'Transformed Response {i}' for i in np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1],dtype='<U80')
210 response_transform_comment5 = np.array([f'Transformed Response {i}' for i in np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1],dtype='<U80')
211 control_indices = np.arange(ds[environment]['response_transformation_matrix'].shape[0])
212 else:
213 control_coordinates = coordinates[control_indices]
215 if 'reference_transformation_matrix' in ds[environment].variables:
216 drive_coordinates = coordinate_array(np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1,0)
217 drive_transform_comment1 = np.array([f'Unknown :: Transformed Drive {i}' for i in np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1],dtype='<U80')
218 drive_transform_comment2 = np.array([f'Transformed Drive {i} :: Transformed Drive {i}' for i in np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1],dtype='<U80')
219 drive_transform_comment3 = np.array([f'Transformed Drive {i} :: Transformed Drive {i}' for i in np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1],dtype='<U80')
220 drive_transform_comment4 = np.array([f'Transformed Drive {i}' for i in np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1],dtype='<U80')
221 drive_transform_comment5 = np.array([f'Transformed Drive {i}' for i in np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1],dtype='<U80')
222 drives = np.ones(ds[environment]['reference_transformation_matrix'].shape[0],dtype=bool)
223 else:
224 drive_coordinates = coordinates[drives]
226 # Load the spectral data
227 frequency_spacing = ds.sample_rate/ds[environment].sysid_frame_size
228 fft_lines = ds[environment].dimensions['sysid_fft_lines'].size
229 frequencies = np.arange(fft_lines)*frequency_spacing
231 frf_array = np.moveaxis(
232 np.array(ds[environment]['frf_data_real'][:]
233 + 1j*ds[environment]['frf_data_imag'][:]),
234 0, -1)
236 response_cpsd_array = np.moveaxis(
237 np.array(ds[environment]['response_cpsd_real'][:]
238 + 1j*ds[environment]['response_cpsd_imag'][:]),
239 0, -1)
241 drive_cpsd_array = np.moveaxis(
242 np.array(ds[environment]['reference_cpsd_real'][:]
243 + 1j*ds[environment]['reference_cpsd_imag'][:]),
244 0, -1)
246 response_noise_cpsd_array = np.moveaxis(
247 np.array(ds[environment]['response_noise_cpsd_real'][:]
248 + 1j*ds[environment]['response_noise_cpsd_imag'][:]),
249 0, -1)
251 drive_noise_cpsd_array = np.moveaxis(
252 np.array(ds[environment]['reference_noise_cpsd_real'][:]
253 + 1j*ds[environment]['reference_noise_cpsd_imag'][:]),
254 0, -1)
256 coherence_array = np.moveaxis(np.array(ds[environment]['frf_coherence'][:]),
257 0,-1)
259 response_coordinates_cpsd = outer_product(control_coordinates, control_coordinates)
260 drive_coordinates_cpsd = outer_product(drive_coordinates, drive_coordinates)
261 frf_coordinates = outer_product(control_coordinates,drive_coordinates)
262 coherence_coordinates = control_coordinates[:,np.newaxis]
264 comment1 = np.char.add(np.char.add(np.array(ds['channels']['channel_type'][:], dtype='<U80'),
265 np.array(' :: ')),
266 np.array(ds['channels']['unit'][:], dtype='<U80'))
267 comment2 = np.char.add(np.char.add(np.array(ds['channels']['physical_device'][:], dtype='<U80'),
268 np.array(' :: ')),
269 np.array(ds['channels']['physical_channel'][:], dtype='<U80'))
270 comment3 = np.char.add(np.char.add(np.array(ds['channels']['feedback_device'][:], dtype='<U80'),
271 np.array(' :: ')),
272 np.array(ds['channels']['feedback_channel'][:], dtype='<U80'))
273 comment4 = np.array(ds['channels']['comment'][:], dtype='<U80')
274 comment5 = np.array(ds['channels']['make'][:], dtype='<U80')
275 for key in ('model', 'serial_number', 'triax_dof'):
276 comment5 = np.char.add(comment5, np.array(' '))
277 comment5 = np.char.add(comment5, np.array(ds['channels'][key][:], dtype='<U80'))
279 full_comment1 = comment1[environment_channels]
280 full_comment2 = comment2[environment_channels]
281 full_comment3 = comment3[environment_channels]
282 full_comment4 = comment4[environment_channels]
283 full_comment5 = comment5[environment_channels]
285 if 'response_transformation_matrix' in ds[environment].variables:
286 comment1 = response_transform_comment1
287 comment2 = response_transform_comment2
288 comment3 = response_transform_comment3
289 comment4 = response_transform_comment4
290 comment5 = response_transform_comment5
291 else:
292 comment1 = full_comment1
293 comment2 = full_comment2
294 comment3 = full_comment3
295 comment4 = full_comment4
296 comment5 = full_comment5
297 comment1_response_cpsd = np.empty((response_coordinates_cpsd.shape[0], response_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
298 comment2_response_cpsd = np.empty((response_coordinates_cpsd.shape[0], response_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
299 comment3_response_cpsd = np.empty((response_coordinates_cpsd.shape[0], response_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
300 comment4_response_cpsd = np.empty((response_coordinates_cpsd.shape[0], response_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
301 comment5_response_cpsd = np.empty((response_coordinates_cpsd.shape[0], response_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
302 comment1_coherence = np.empty(response_coordinates_cpsd.shape[0], dtype=comment1.dtype)
303 comment2_coherence = np.empty(response_coordinates_cpsd.shape[0], dtype=comment1.dtype)
304 comment3_coherence = np.empty(response_coordinates_cpsd.shape[0], dtype=comment1.dtype)
305 comment4_coherence = np.empty(response_coordinates_cpsd.shape[0], dtype=comment1.dtype)
306 comment5_coherence = np.empty(response_coordinates_cpsd.shape[0], dtype=comment1.dtype)
307 for i, idx in enumerate(control_indices):
308 comment1_coherence[i] = comment1[idx]
309 comment2_coherence[i] = comment2[idx]
310 comment3_coherence[i] = comment3[idx]
311 comment4_coherence[i] = comment4[idx]
312 comment5_coherence[i] = comment5[idx]
313 for j, jdx in enumerate(control_indices):
314 comment1_response_cpsd[i, j] = comment1[idx] + ' // ' + comment1[jdx]
315 comment2_response_cpsd[i, j] = comment2[idx] + ' // ' + comment2[jdx]
316 comment3_response_cpsd[i, j] = comment3[idx] + ' // ' + comment3[jdx]
317 comment4_response_cpsd[i, j] = comment4[idx] + ' // ' + comment4[jdx]
318 comment5_response_cpsd[i, j] = comment5[idx] + ' // ' + comment5[jdx]
320 if 'reference_transformation_matrix' in ds[environment].variables:
321 comment1 = drive_transform_comment1
322 comment2 = drive_transform_comment2
323 comment3 = drive_transform_comment3
324 comment4 = drive_transform_comment4
325 comment5 = drive_transform_comment5
326 else:
327 comment1 = full_comment1
328 comment2 = full_comment2
329 comment3 = full_comment3
330 comment4 = full_comment4
331 comment5 = full_comment5
332 comment1_drive_cpsd = np.empty((drive_coordinates_cpsd.shape[0], drive_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
333 comment2_drive_cpsd = np.empty((drive_coordinates_cpsd.shape[0], drive_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
334 comment3_drive_cpsd = np.empty((drive_coordinates_cpsd.shape[0], drive_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
335 comment4_drive_cpsd = np.empty((drive_coordinates_cpsd.shape[0], drive_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
336 comment5_drive_cpsd = np.empty((drive_coordinates_cpsd.shape[0], drive_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
337 drive_indices = np.where(drives)[0]
338 for i, idx in enumerate(drive_indices):
339 for j, jdx in enumerate(drive_indices):
340 comment1_drive_cpsd[i, j] = comment1[idx] + ' // ' + comment1[jdx]
341 comment2_drive_cpsd[i, j] = comment2[idx] + ' // ' + comment2[jdx]
342 comment3_drive_cpsd[i, j] = comment3[idx] + ' // ' + comment3[jdx]
343 comment4_drive_cpsd[i, j] = comment4[idx] + ' // ' + comment4[jdx]
344 comment5_drive_cpsd[i, j] = comment5[idx] + ' // ' + comment5[jdx]
346 if 'response_transformation_matrix' in ds[environment].variables:
347 rcomment1 = response_transform_comment1
348 rcomment2 = response_transform_comment2
349 rcomment3 = response_transform_comment3
350 rcomment4 = response_transform_comment4
351 rcomment5 = response_transform_comment5
352 else:
353 rcomment1 = full_comment1
354 rcomment2 = full_comment2
355 rcomment3 = full_comment3
356 rcomment4 = full_comment4
357 rcomment5 = full_comment5
358 if 'reference_transformation_matrix' in ds[environment].variables:
359 dcomment1 = drive_transform_comment1
360 dcomment2 = drive_transform_comment2
361 dcomment3 = drive_transform_comment3
362 dcomment4 = drive_transform_comment4
363 dcomment5 = drive_transform_comment5
364 else:
365 dcomment1 = full_comment1
366 dcomment2 = full_comment2
367 dcomment3 = full_comment3
368 dcomment4 = full_comment4
369 dcomment5 = full_comment5
371 comment1_frf = np.empty((frf_coordinates.shape[0], frf_coordinates.shape[1]), dtype=comment1.dtype)
372 comment2_frf = np.empty((frf_coordinates.shape[0], frf_coordinates.shape[1]), dtype=comment1.dtype)
373 comment3_frf = np.empty((frf_coordinates.shape[0], frf_coordinates.shape[1]), dtype=comment1.dtype)
374 comment4_frf = np.empty((frf_coordinates.shape[0], frf_coordinates.shape[1]), dtype=comment1.dtype)
375 comment5_frf = np.empty((frf_coordinates.shape[0], frf_coordinates.shape[1]), dtype=comment1.dtype)
376 for i, idx in enumerate(control_indices):
377 for j, jdx in enumerate(drive_indices):
378 comment1_frf[i, j] = rcomment1[idx] + ' // ' + dcomment1[jdx]
379 comment2_frf[i, j] = rcomment2[idx] + ' // ' + dcomment2[jdx]
380 comment3_frf[i, j] = rcomment3[idx] + ' // ' + dcomment3[jdx]
381 comment4_frf[i, j] = rcomment4[idx] + ' // ' + dcomment4[jdx]
382 comment5_frf[i, j] = rcomment5[idx] + ' // ' + dcomment5[jdx]
384 # Save the data to SDynpy objects
385 response_cpsd = data_array(FunctionTypes.POWER_SPECTRAL_DENSITY,
386 frequencies, response_cpsd_array, response_coordinates_cpsd,
387 comment1_response_cpsd, comment2_response_cpsd, comment3_response_cpsd,
388 comment4_response_cpsd, comment5_response_cpsd)
389 response_noise_cpsd = data_array(FunctionTypes.POWER_SPECTRAL_DENSITY,
390 frequencies, response_noise_cpsd_array, response_coordinates_cpsd,
391 comment1_response_cpsd, comment2_response_cpsd, comment3_response_cpsd,
392 comment4_response_cpsd, comment5_response_cpsd)
393 drive_cpsd = data_array(FunctionTypes.POWER_SPECTRAL_DENSITY,
394 frequencies, drive_cpsd_array, drive_coordinates_cpsd,
395 comment1_drive_cpsd, comment2_drive_cpsd, comment3_drive_cpsd,
396 comment4_drive_cpsd, comment5_drive_cpsd)
397 drive_noise_cpsd = data_array(FunctionTypes.POWER_SPECTRAL_DENSITY,
398 frequencies, drive_noise_cpsd_array, drive_coordinates_cpsd,
399 comment1_drive_cpsd, comment2_drive_cpsd, comment3_drive_cpsd,
400 comment4_drive_cpsd, comment5_drive_cpsd)
401 frfs = data_array(FunctionTypes.FREQUENCY_RESPONSE_FUNCTION,
402 frequencies, frf_array, frf_coordinates,
403 comment1_frf,comment2_frf,comment3_frf,comment4_frf,comment5_frf)
404 coherence = data_array(FunctionTypes.MULTIPLE_COHERENCE,
405 frequencies,coherence_array,coherence_coordinates,
406 comment1_coherence,comment2_coherence,comment3_coherence,
407 comment4_coherence,comment5_coherence)
409 return frfs, response_cpsd, drive_cpsd, response_noise_cpsd, drive_noise_cpsd, coherence
411def read_random_spectral_data(file, coordinate_override_column=None):
412 if isinstance(file, str):
413 ds = nc4.Dataset(file, 'r')
414 elif isinstance(file, nc4.Dataset):
415 ds = file
417 environment = [group for group in ds.groups if not group == 'channels'][0]
419 # Get the channels in the group
420 if coordinate_override_column is None:
421 nodes = [int(''.join(char for char in node if char in '0123456789'))
422 for node in ds['channels']['node_number']]
423 directions = np.array(ds['channels']['node_direction'][:], dtype='<U3')
424 coordinates = coordinate_array(nodes, directions)
425 else:
426 coordinates = coordinate_array(string_array=ds['channels'][coordinate_override_column])
427 drives = ds['channels']['feedback_device'][:] != ''
429 # Cull down to just those in the environment
430 environment_index = np.where(ds['environment_names'][:] == environment)[0][0]
431 environment_channels = ds['environment_active_channels'][:, environment_index].astype(bool)
433 drives = drives[environment_channels]
434 coordinates = coordinates[environment_channels]
436 control_indices = ds[environment]['control_channel_indices'][:]
438 if 'response_transformation_matrix' in ds[environment].variables:
439 control_coordinates = coordinate_array(np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1,0)
440 response_transform_comment1 = np.array([f'Unknown :: Transformed Response {i}' for i in np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1],dtype='<U80')
441 response_transform_comment2 = np.array([f'Transformed Response {i} :: Transformed Response {i}' for i in np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1],dtype='<U80')
442 response_transform_comment3 = np.array([f'Transformed Response {i} :: Transformed Response {i}' for i in np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1],dtype='<U80')
443 response_transform_comment4 = np.array([f'Transformed Response {i}' for i in np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1],dtype='<U80')
444 response_transform_comment5 = np.array([f'Transformed Response {i}' for i in np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1],dtype='<U80')
445 control_indices = np.arange(ds[environment]['response_transformation_matrix'].shape[0])
446 else:
447 control_coordinates = coordinates[control_indices]
449 if 'reference_transformation_matrix' in ds[environment].variables:
450 drive_coordinates = coordinate_array(np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1,0)
451 drive_transform_comment1 = np.array([f'Unknown :: Transformed Drive {i}' for i in np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1],dtype='<U80')
452 drive_transform_comment2 = np.array([f'Transformed Drive {i} :: Transformed Drive {i}' for i in np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1],dtype='<U80')
453 drive_transform_comment3 = np.array([f'Transformed Drive {i} :: Transformed Drive {i}' for i in np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1],dtype='<U80')
454 drive_transform_comment4 = np.array([f'Transformed Drive {i}' for i in np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1],dtype='<U80')
455 drive_transform_comment5 = np.array([f'Transformed Drive {i}' for i in np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1],dtype='<U80')
456 drives = np.ones(ds[environment]['reference_transformation_matrix'].shape[0],dtype=bool)
457 else:
458 drive_coordinates = coordinates[drives]
460 # Load the spectral data
461 frequencies = np.array(ds[environment]['specification_frequency_lines'][:])
463 spec_cpsd = np.moveaxis(
464 np.array(ds[environment]['specification_cpsd_matrix_real'][:]
465 + 1j*ds[environment]['specification_cpsd_matrix_imag'][:]),
466 0, -1)
468 response_cpsd = np.moveaxis(
469 np.array(ds[environment]['response_cpsd_real'][:]
470 + 1j*ds[environment]['response_cpsd_imag'][:]),
471 0, -1)
473 drive_cpsd = np.moveaxis(
474 np.array(ds[environment]['drive_cpsd_real'][:]
475 + 1j*ds[environment]['drive_cpsd_imag'][:]),
476 0, -1)
478 response_coordinates_cpsd = outer_product(control_coordinates, control_coordinates)
479 drive_coordinates_cpsd = outer_product(drive_coordinates, drive_coordinates)
481 comment1 = np.char.add(np.char.add(np.array(ds['channels']['channel_type'][:], dtype='<U80'),
482 np.array(' :: ')),
483 np.array(ds['channels']['unit'][:], dtype='<U80'))
484 comment2 = np.char.add(np.char.add(np.array(ds['channels']['physical_device'][:], dtype='<U80'),
485 np.array(' :: ')),
486 np.array(ds['channels']['physical_channel'][:], dtype='<U80'))
487 comment3 = np.char.add(np.char.add(np.array(ds['channels']['feedback_device'][:], dtype='<U80'),
488 np.array(' :: ')),
489 np.array(ds['channels']['feedback_channel'][:], dtype='<U80'))
490 comment4 = np.array(ds['channels']['comment'][:], dtype='<U80')
491 comment5 = np.array(ds['channels']['make'][:], dtype='<U80')
492 for key in ('model', 'serial_number', 'triax_dof'):
493 comment5 = np.char.add(comment5, np.array(' '))
494 comment5 = np.char.add(comment5, np.array(ds['channels'][key][:], dtype='<U80'))
496 full_comment1 = comment1[environment_channels]
497 full_comment2 = comment2[environment_channels]
498 full_comment3 = comment3[environment_channels]
499 full_comment4 = comment4[environment_channels]
500 full_comment5 = comment5[environment_channels]
502 if 'response_transformation_matrix' in ds[environment].variables:
503 comment1 = response_transform_comment1
504 comment2 = response_transform_comment2
505 comment3 = response_transform_comment3
506 comment4 = response_transform_comment4
507 comment5 = response_transform_comment5
508 else:
509 comment1 = full_comment1
510 comment2 = full_comment2
511 comment3 = full_comment3
512 comment4 = full_comment4
513 comment5 = full_comment5
514 comment1_response_cpsd = np.empty((response_coordinates_cpsd.shape[0], response_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
515 comment2_response_cpsd = np.empty((response_coordinates_cpsd.shape[0], response_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
516 comment3_response_cpsd = np.empty((response_coordinates_cpsd.shape[0], response_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
517 comment4_response_cpsd = np.empty((response_coordinates_cpsd.shape[0], response_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
518 comment5_response_cpsd = np.empty((response_coordinates_cpsd.shape[0], response_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
519 for i, idx in enumerate(control_indices):
520 for j, jdx in enumerate(control_indices):
521 comment1_response_cpsd[i, j] = comment1[idx] + ' // ' + comment1[jdx]
522 comment2_response_cpsd[i, j] = comment2[idx] + ' // ' + comment2[jdx]
523 comment3_response_cpsd[i, j] = comment3[idx] + ' // ' + comment3[jdx]
524 comment4_response_cpsd[i, j] = comment4[idx] + ' // ' + comment4[jdx]
525 comment5_response_cpsd[i, j] = comment5[idx] + ' // ' + comment5[jdx]
527 if 'reference_transformation_matrix' in ds[environment].variables:
528 comment1 = drive_transform_comment1
529 comment2 = drive_transform_comment2
530 comment3 = drive_transform_comment3
531 comment4 = drive_transform_comment4
532 comment5 = drive_transform_comment5
533 else:
534 comment1 = full_comment1
535 comment2 = full_comment2
536 comment3 = full_comment3
537 comment4 = full_comment4
538 comment5 = full_comment5
539 comment1_drive_cpsd = np.empty((drive_coordinates_cpsd.shape[0], drive_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
540 comment2_drive_cpsd = np.empty((drive_coordinates_cpsd.shape[0], drive_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
541 comment3_drive_cpsd = np.empty((drive_coordinates_cpsd.shape[0], drive_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
542 comment4_drive_cpsd = np.empty((drive_coordinates_cpsd.shape[0], drive_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
543 comment5_drive_cpsd = np.empty((drive_coordinates_cpsd.shape[0], drive_coordinates_cpsd.shape[1]), dtype=comment1.dtype)
544 drive_indices = np.where(drives)[0]
545 for i, idx in enumerate(drive_indices):
546 for j, jdx in enumerate(drive_indices):
547 comment1_drive_cpsd[i, j] = comment1[idx] + ' // ' + comment1[jdx]
548 comment2_drive_cpsd[i, j] = comment2[idx] + ' // ' + comment2[jdx]
549 comment3_drive_cpsd[i, j] = comment3[idx] + ' // ' + comment3[jdx]
550 comment4_drive_cpsd[i, j] = comment4[idx] + ' // ' + comment4[jdx]
551 comment5_drive_cpsd[i, j] = comment5[idx] + ' // ' + comment5[jdx]
555 # Save the data to SDynpy objects
556 response_cpsd = data_array(FunctionTypes.POWER_SPECTRAL_DENSITY,
557 frequencies, response_cpsd, response_coordinates_cpsd,
558 comment1_response_cpsd, comment2_response_cpsd, comment3_response_cpsd,
559 comment4_response_cpsd, comment5_response_cpsd)
560 spec_cpsd = data_array(FunctionTypes.POWER_SPECTRAL_DENSITY,
561 frequencies, spec_cpsd, response_coordinates_cpsd,
562 comment1_response_cpsd, comment2_response_cpsd, comment3_response_cpsd,
563 comment4_response_cpsd, comment5_response_cpsd)
564 drive_cpsd = data_array(FunctionTypes.POWER_SPECTRAL_DENSITY,
565 frequencies, drive_cpsd, drive_coordinates_cpsd,
566 comment1_drive_cpsd, comment2_drive_cpsd, comment3_drive_cpsd,
567 comment4_drive_cpsd, comment5_drive_cpsd)
568 return response_cpsd, spec_cpsd, drive_cpsd
571def read_modal_data(file, coordinate_override_column=None, read_only_indices=None):
572 if isinstance(file, str):
573 ds = nc4.Dataset(file, 'r')
574 elif isinstance(file, nc4.Dataset):
575 ds = file
576 if read_only_indices is None:
577 read_only_indices = slice(None)
578 # Get parameters
579 num_channels = ds.groups['channels'].variables['physical_device'].size
580 group_key = [g for g in ds.groups if not g == 'channels'][0]
581 group = ds.groups[group_key]
582 sample_rate = ds.sample_rate
583 samples_per_frame = group.samples_per_frame
584 num_averages = group.num_averages
585 # Load in the time data
586 try:
587 output_data = np.array(ds['time_data'][...][read_only_indices]).reshape(num_channels, num_averages, samples_per_frame).transpose(1, 0, 2)
588 except ValueError:
589 warnings.warn('Number of averages in the time data does not match the number of averages specified in the test settings. Your test may be incomplete.')
590 output_data = np.array(ds['time_data'][...][read_only_indices]).reshape(num_channels, -1, samples_per_frame).transpose(1, 0, 2)
591 abscissa = np.arange(samples_per_frame) / sample_rate
592 if coordinate_override_column is None:
593 nodes = [int(''.join(char for char in node if char in '0123456789'))
594 for node in ds['channels']['node_number'][...][read_only_indices]]
595 directions = np.array(ds['channels']['node_direction'][...][read_only_indices], dtype='<U3')
596 coordinates = coordinate_array(nodes, directions)[:, np.newaxis]
597 else:
598 coordinates = coordinate_array(string_array=ds['channels'][coordinate_override_column][read_only_indices])[
599 :, np.newaxis]
600 array = {name: np.array(variable[:]) for name, variable in ds['channels'].variables.items()}
601 channel_table = pd.DataFrame(array)
602 comment1 = np.char.add(np.char.add(np.array(ds['channels']['channel_type'][...][read_only_indices], dtype='<U80'),
603 np.array(' :: ')),
604 np.array(ds['channels']['unit'][...][read_only_indices], dtype='<U80'))
605 comment2 = np.char.add(np.char.add(np.array(ds['channels']['physical_device'][...][read_only_indices], dtype='<U80'),
606 np.array(' :: ')),
607 np.array(ds['channels']['physical_channel'][...][read_only_indices], dtype='<U80'))
608 comment3 = np.char.add(np.char.add(np.array(ds['channels']['feedback_device'][...][read_only_indices], dtype='<U80'),
609 np.array(' :: ')),
610 np.array(ds['channels']['feedback_channel'][...][read_only_indices], dtype='<U80'))
611 comment4 = np.array(ds['channels']['comment'][...][read_only_indices], dtype='<U80')
612 comment5 = np.array(ds['channels']['make'][...][read_only_indices], dtype='<U80')
613 for key in ('model', 'serial_number', 'triax_dof'):
614 comment5 = np.char.add(comment5, np.array(' '))
615 comment5 = np.char.add(comment5, np.array(ds['channels'][key][...][read_only_indices], dtype='<U80'))
616 time_data = data_array(FunctionTypes.TIME_RESPONSE,
617 abscissa,
618 output_data,
619 coordinates,
620 comment1,
621 comment2,
622 comment3,
623 comment4,
624 comment5)
625 # Response and Reference Indices
626 kept_indices = np.arange(num_channels)[read_only_indices]
627 reference_indices = np.array(group.variables['reference_channel_indices'][:])
628 response_indices = np.array(group.variables['response_channel_indices'][:])
629 keep_response_indices = np.array([i for i, index in enumerate(response_indices) if index in kept_indices])
630 keep_reference_indices = np.array([i for i, index in enumerate(reference_indices) if index in kept_indices])
631 frequency_lines = np.arange(group.dimensions['fft_lines'].size)*sample_rate/samples_per_frame
632 coherence_data = np.array(group['coherence'][:, keep_response_indices]).T
633 comment1 = np.char.add(np.char.add(np.array(ds['channels']['channel_type'][...][response_indices[keep_response_indices]], dtype='<U80'),
634 np.array(' :: ')),
635 np.array(ds['channels']['unit'][...][response_indices[keep_response_indices]], dtype='<U80'))
636 comment2 = np.char.add(np.char.add(np.array(ds['channels']['physical_device'][...][response_indices[keep_response_indices]], dtype='<U80'),
637 np.array(' :: ')),
638 np.array(ds['channels']['physical_channel'][...][response_indices[keep_response_indices]], dtype='<U80'))
639 comment3 = np.char.add(np.char.add(np.array(ds['channels']['feedback_device'][...][response_indices[keep_response_indices]], dtype='<U80'),
640 np.array(' :: ')),
641 np.array(ds['channels']['feedback_channel'][...][response_indices[keep_response_indices]], dtype='<U80'))
642 comment4 = np.array(ds['channels']['comment'][...][response_indices[keep_response_indices]], dtype='<U80')
643 comment5 = np.array(ds['channels']['make'][...][response_indices[keep_response_indices]], dtype='<U80')
644 for key in ('model', 'serial_number', 'triax_dof'):
645 comment5 = np.char.add(comment5, np.array(' '))
646 comment5 = np.char.add(comment5, np.array(ds['channels'][key][...][response_indices[keep_response_indices]], dtype='<U80'))
647 coherence_data = data_array(FunctionTypes.MULTIPLE_COHERENCE,
648 frequency_lines,
649 coherence_data,
650 coordinates[response_indices[keep_response_indices]],
651 comment1,
652 comment2,
653 comment3,
654 comment4,
655 comment5)
656 # Frequency Response Functions
657 frf_data = np.moveaxis(np.array(group['frf_data_real'])[:, keep_response_indices[:, np.newaxis], keep_reference_indices]
658 + np.array(group['frf_data_imag'])[:, keep_response_indices[:, np.newaxis], keep_reference_indices]*1j, 0, -1)
659 frf_coordinate = outer_product(coordinates[response_indices[keep_response_indices], 0],
660 coordinates[reference_indices[keep_reference_indices], 0])
661 # print(response_indices[keep_response_indices])
662 # print(reference_indices[keep_reference_indices])
663 response_comment1 = np.char.add(np.char.add(np.array(ds['channels']['channel_type'][...][response_indices[keep_response_indices]], dtype='<U80'),
664 np.array(' :: ')),
665 np.array(ds['channels']['unit'][...][response_indices[keep_response_indices]], dtype='<U80'))
666 response_comment2 = np.char.add(np.char.add(np.array(ds['channels']['physical_device'][...][response_indices[keep_response_indices]], dtype='<U80'),
667 np.array(' :: ')),
668 np.array(ds['channels']['physical_channel'][...][response_indices[keep_response_indices]], dtype='<U80'))
669 response_comment3 = np.char.add(np.char.add(np.array(ds['channels']['feedback_device'][...][response_indices[keep_response_indices]], dtype='<U80'),
670 np.array(' :: ')),
671 np.array(ds['channels']['feedback_channel'][...][response_indices[keep_response_indices]], dtype='<U80'))
672 response_comment4 = np.array(ds['channels']['comment'][...][response_indices[keep_response_indices]], dtype='<U80')
673 response_comment5 = np.array(ds['channels']['make'][...][response_indices[keep_response_indices]], dtype='<U80')
674 for key in ('model', 'serial_number', 'triax_dof'):
675 response_comment5 = np.char.add(response_comment5, np.array(' '))
676 response_comment5 = np.char.add(response_comment5, np.array(ds['channels'][key][...][response_indices[keep_response_indices]], dtype='<U80'))
677 reference_comment1 = np.char.add(np.char.add(np.array(ds['channels']['channel_type'][...][reference_indices[keep_reference_indices]], dtype='<U80'),
678 np.array(' :: ')),
679 np.array(ds['channels']['unit'][...][reference_indices[keep_reference_indices]], dtype='<U80'))
680 reference_comment2 = np.char.add(np.char.add(np.array(ds['channels']['physical_device'][...][reference_indices[keep_reference_indices]], dtype='<U80'),
681 np.array(' :: ')),
682 np.array(ds['channels']['physical_channel'][...][reference_indices[keep_reference_indices]], dtype='<U80'))
683 reference_comment3 = np.char.add(np.char.add(np.array(ds['channels']['feedback_device'][...][reference_indices[keep_reference_indices]], dtype='<U80'),
684 np.array(' :: ')),
685 np.array(ds['channels']['feedback_channel'][...][reference_indices[keep_reference_indices]], dtype='<U80'))
686 reference_comment4 = np.array(ds['channels']['comment'][...][reference_indices[keep_reference_indices]], dtype='<U80')
687 reference_comment5 = np.array(ds['channels']['make'][...][reference_indices[keep_reference_indices]], dtype='<U80')
688 for key in ('model', 'serial_number', 'triax_dof'):
689 reference_comment5 = np.char.add(reference_comment5, np.array(' '))
690 reference_comment5 = np.char.add(reference_comment5, np.array(ds['channels'][key][...][reference_indices[keep_reference_indices]], dtype='<U80'))
691 response_comment1, reference_comment1 = np.broadcast_arrays(response_comment1[:, np.newaxis], reference_comment1)
692 comment1 = np.char.add(np.char.add(response_comment1, np.array(' / ')), reference_comment1)
693 response_comment2, reference_comment2 = np.broadcast_arrays(response_comment2[:, np.newaxis], reference_comment2)
694 comment2 = np.char.add(np.char.add(response_comment2, np.array(' / ')), reference_comment2)
695 response_comment3, reference_comment3 = np.broadcast_arrays(response_comment3[:, np.newaxis], reference_comment3)
696 comment3 = np.char.add(np.char.add(response_comment3, np.array(' / ')), reference_comment3)
697 response_comment4, reference_comment4 = np.broadcast_arrays(response_comment4[:, np.newaxis], reference_comment4)
698 comment4 = np.char.add(np.char.add(response_comment4, np.array(' / ')), reference_comment4)
699 response_comment5, reference_comment5 = np.broadcast_arrays(response_comment5[:, np.newaxis], reference_comment5)
700 comment5 = np.char.add(np.char.add(response_comment5, np.array(' / ')), reference_comment5)
701 frf_data = data_array(FunctionTypes.FREQUENCY_RESPONSE_FUNCTION,
702 frequency_lines,
703 frf_data,
704 frf_coordinate,
705 comment1,
706 comment2,
707 comment3,
708 comment4,
709 comment5)
710 return time_data, frf_data, coherence_data, channel_table
713def read_transient_control_data(file, coordinate_override_column=None):
714 if isinstance(file, str):
715 ds = nc4.Dataset(file, 'r')
716 elif isinstance(file, nc4.Dataset):
717 ds = file
718 coordinate_override_column = None
720 environment = [group for group in ds.groups if not group == 'channels'][0]
722 # Get the channels in the group
723 if coordinate_override_column is None:
724 nodes = [int(''.join(char for char in node if char in '0123456789'))
725 for node in ds['channels']['node_number']]
726 directions = np.array(ds['channels']['node_direction'][:], dtype='<U3')
727 coordinates = coordinate_array(nodes, directions)
728 else:
729 coordinates = coordinate_array(string_array=ds['channels'][coordinate_override_column])
730 drives = ds['channels']['feedback_device'][:] != ''
732 # Cull down to just those in the environment
733 environment_index = np.where(ds['environment_names'][:] == environment)[0][0]
734 environment_channels = ds['environment_active_channels'][:, environment_index].astype(bool)
736 drives = drives[environment_channels]
737 coordinates = coordinates[environment_channels]
739 control_indices = ds[environment]['control_channel_indices'][:]
741 if 'response_transformation_matrix' in ds[environment].variables:
742 control_coordinates = coordinate_array(np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1,0)
743 response_transform_comment1 = np.array([f'Unknown :: Transformed Response {i}' for i in np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1],dtype='<U80')
744 response_transform_comment2 = np.array([f'Transformed Response {i} :: Transformed Response {i}' for i in np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1],dtype='<U80')
745 response_transform_comment3 = np.array([f'Transformed Response {i} :: Transformed Response {i}' for i in np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1],dtype='<U80')
746 response_transform_comment4 = np.array([f'Transformed Response {i}' for i in np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1],dtype='<U80')
747 response_transform_comment5 = np.array([f'Transformed Response {i}' for i in np.arange(ds[environment]['response_transformation_matrix'].shape[0])+1],dtype='<U80')
748 control_indices = np.arange(ds[environment]['response_transformation_matrix'].shape[0])
749 else:
750 control_coordinates = coordinates[control_indices]
752 if 'reference_transformation_matrix' in ds[environment].variables:
753 drive_coordinates = coordinate_array(np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1,0)
754 drive_transform_comment1 = np.array([f'Unknown :: Transformed Drive {i}' for i in np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1],dtype='<U80')
755 drive_transform_comment2 = np.array([f'Transformed Drive {i} :: Transformed Drive {i}' for i in np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1],dtype='<U80')
756 drive_transform_comment3 = np.array([f'Transformed Drive {i} :: Transformed Drive {i}' for i in np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1],dtype='<U80')
757 drive_transform_comment4 = np.array([f'Transformed Drive {i}' for i in np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1],dtype='<U80')
758 drive_transform_comment5 = np.array([f'Transformed Drive {i}' for i in np.arange(ds[environment]['reference_transformation_matrix'].shape[0])+1],dtype='<U80')
759 drives = np.ones(ds[environment]['reference_transformation_matrix'].shape[0],dtype=bool)
760 else:
761 drive_coordinates = coordinates[drives]
763 # Load the time data
764 timesteps = np.arange(ds[environment].dimensions['signal_samples'].size)/ds.sample_rate
766 spec_signal = np.array(ds[environment]['control_signal'][...])
768 response_signal = np.array(ds[environment]['control_response'][...])
770 drive_signal = np.array(ds[environment]['control_drives'][...])
772 response_coordinates = control_coordinates[:, np.newaxis]
773 drive_coordinates = drive_coordinates[:, np.newaxis]
775 comment1 = np.char.add(np.char.add(np.array(ds['channels']['channel_type'][:], dtype='<U80'),
776 np.array(' :: ')),
777 np.array(ds['channels']['unit'][:], dtype='<U80'))
778 comment2 = np.char.add(np.char.add(np.array(ds['channels']['physical_device'][:], dtype='<U80'),
779 np.array(' :: ')),
780 np.array(ds['channels']['physical_channel'][:], dtype='<U80'))
781 comment3 = np.char.add(np.char.add(np.array(ds['channels']['feedback_device'][:], dtype='<U80'),
782 np.array(' :: ')),
783 np.array(ds['channels']['feedback_channel'][:], dtype='<U80'))
784 comment4 = np.array(ds['channels']['comment'][:], dtype='<U80')
785 comment5 = np.array(ds['channels']['make'][:], dtype='<U80')
786 for key in ('model', 'serial_number', 'triax_dof'):
787 comment5 = np.char.add(comment5, np.array(' '))
788 comment5 = np.char.add(comment5, np.array(ds['channels'][key][:], dtype='<U80'))
790 full_comment1 = comment1[environment_channels]
791 full_comment2 = comment2[environment_channels]
792 full_comment3 = comment3[environment_channels]
793 full_comment4 = comment4[environment_channels]
794 full_comment5 = comment5[environment_channels]
796 if 'response_transformation_matrix' in ds[environment].variables:
797 comment1 = response_transform_comment1
798 comment2 = response_transform_comment2
799 comment3 = response_transform_comment3
800 comment4 = response_transform_comment4
801 comment5 = response_transform_comment5
802 else:
803 comment1 = full_comment1[control_indices]
804 comment2 = full_comment2[control_indices]
805 comment3 = full_comment3[control_indices]
806 comment4 = full_comment4[control_indices]
807 comment5 = full_comment5[control_indices]
809 # Save the data to SDynpy objects
810 response_signal = data_array(FunctionTypes.TIME_RESPONSE,
811 timesteps, response_signal, response_coordinates,
812 comment1, comment2, comment3,
813 comment4, comment5)
814 spec_signal = data_array(FunctionTypes.TIME_RESPONSE,
815 timesteps, spec_signal, response_coordinates,
816 comment1, comment2, comment3,
817 comment4, comment5)
819 if 'reference_transformation_matrix' in ds[environment].variables:
820 comment1 = drive_transform_comment1
821 comment2 = drive_transform_comment2
822 comment3 = drive_transform_comment3
823 comment4 = drive_transform_comment4
824 comment5 = drive_transform_comment5
825 else:
826 comment1 = full_comment1[drives]
827 comment2 = full_comment2[drives]
828 comment3 = full_comment3[drives]
829 comment4 = full_comment4[drives]
830 comment5 = full_comment5[drives]
832 drive_signal = data_array(FunctionTypes.TIME_RESPONSE,
833 timesteps, drive_signal, drive_coordinates,
834 comment1, comment2, comment3,
835 comment4, comment5)
837 return response_signal, spec_signal, drive_signal
840def create_synthetic_test(spreadsheet_file_name: str,
841 system_filename: str, system: System,
842 excitation_coordinates: CoordinateArray,
843 response_coordinates: CoordinateArray,
844 rattlesnake_directory: str,
845 displacement_derivative=2,
846 sample_rate: int = None,
847 time_per_read: float = None,
848 time_per_write: float = None,
849 integration_oversample: int = 10,
850 environments: list = [],
851 channel_comment_data: list = None,
852 channel_serial_number_data: list = None,
853 channel_triax_dof_data: list = None,
854 channel_engineering_unit_data: list = None,
855 channel_warning_level_data: list = None,
856 channel_abort_level_data: list = None,
857 channel_active_in_environment_data: dict = None
858 ):
859 system.save(system_filename)
860 # Load in Rattlesnake to create a template for the test
861 sys.path.insert(0, rattlesnake_directory)
862 import components as rs
863 environment_data = []
864 for environment_type, environment_name in environments:
865 # Find the identifier
866 environment_type = rs.environments.ControlTypes[environment_type.upper()]
867 environment_data.append((environment_type, environment_name))
868 rs.ui_utilities.save_combined_environments_profile_template(spreadsheet_file_name, environment_data)
869 sys.path.pop(0)
870 # Populate the channel table
871 workbook = opxl.load_workbook(spreadsheet_file_name)
872 worksheet = workbook.get_sheet_by_name('Channel Table')
873 index = 3
874 for i, channel in enumerate(response_coordinates):
875 worksheet.cell(index, 1, i+1)
876 worksheet.cell(index, 2, channel.node)
877 worksheet.cell(index, 3, _string_map[channel.direction])
878 worksheet.cell(index, 12, 'Virtual')
879 worksheet.cell(index, 14, 'Accel')
880 index += 1
881 for i, channel in enumerate(excitation_coordinates):
882 worksheet.cell(index, 1, len(response_coordinates)+i+1)
883 worksheet.cell(index, 2, channel.node)
884 worksheet.cell(index, 3, _string_map[channel.direction])
885 worksheet.cell(index, 12, 'Virtual')
886 worksheet.cell(index, 14, 'Force')
887 worksheet.cell(index, 20, 'Shaker')
888 index += 1
889 # Go through the various channel table data that could have been optionally
890 # provided
891 for column, data in [(4, channel_comment_data),
892 (5, channel_serial_number_data),
893 (6, channel_triax_dof_data),
894 (8, channel_engineering_unit_data),
895 (22, channel_warning_level_data),
896 (23, channel_abort_level_data)]:
897 if data is None:
898 continue
899 for row_index, value in enumerate(data):
900 worksheet.cell(3+row_index, column, value)
901 # Now fill out the environment table
902 if channel_active_in_environment_data is not None:
903 for environment_index, (environment_type, environment_name) in enumerate(environment_data):
904 for row_index, value in enumerate(channel_active_in_environment_data[environment_name]):
905 if value:
906 worksheet.cell(3+row_index, 24+environment_index, 'X')
907 else:
908 for environment_index, (environment_type, environment_name) in enumerate(environment_data):
909 for row_index in range(response_coordinates.size + excitation_coordinates.size):
910 worksheet.cell(3+row_index, 24+environment_index, 'X')
911 worksheet = workbook.get_sheet_by_name('Hardware')
912 worksheet.cell(1, 2, 6)
913 worksheet.cell(2, 2, os.path.abspath(system_filename))
914 if sample_rate is not None:
915 worksheet.cell(3, 2, sample_rate)
916 if time_per_read is not None:
917 worksheet.cell(4, 2, time_per_read)
918 if time_per_write is not None:
919 worksheet.cell(5, 2, time_per_write)
920 worksheet.cell(6, 2, 1)
921 worksheet.cell(7, 2, integration_oversample)
922 workbook.save(spreadsheet_file_name)
924def read_sine_control_data(control_file,
925 read_quantities = 'control_response_signals_combined',
926 excitation_dofs = None, control_dofs = None):
927 concatenated_keys = ['control_response_signals_combined',
928 'control_response_amplitudes',
929 'control_response_phases',
930 'control_drive_modifications']
931 unconcatenated_keys = ['control_response_frequencies',
932 'control_response_arguments',
933 'control_target_phases',
934 'control_target_amplitudes']
935 dimension_labels = {}
936 dimension_labels['control_response_signals_combined'] = ('response','timestep')
937 dimension_labels['control_response_amplitudes'] = ('tone','response','timestep')
938 dimension_labels['control_response_phases'] = ('tone','response','timestep')
939 dimension_labels['control_drive_modifications'] = ('tone','excitation','block_num')
940 dimension_labels['achieved_excitation_signals_combined'] = ('excitation','timestep')
941 dimension_labels['achieved_excitation_signals'] = ('tone','excitation','timestep')
942 dimension_labels['control_response_frequencies'] = ('tone','timestep')
943 dimension_labels['control_response_arguments'] = ('tone','timestep')
944 dimension_labels['control_target_amplitudes'] = ('tone','response','timestep')
945 dimension_labels['control_target_phases'] = ('tone','response','timestep')
946 if isinstance(control_file,str):
947 control_file = np.load(control_file)
948 sample_rate = control_file['sample_rate']
949 if isinstance(read_quantities,str):
950 read_quantities = [read_quantities]
951 return_single = True
952 else:
953 return_single = False
954 return_data = []
955 for read_quantity in read_quantities:
956 try:
957 dimension_label = dimension_labels[read_quantity]
958 except KeyError:
959 raise ValueError(f'{read_quantity} is not a valid quantity to read. read_quantity must be one of {concatenated_keys+unconcatenated_keys}.')
960 # Extract the data and concatenate if necessary
961 if read_quantity in concatenated_keys:
962 data = []
963 for key in control_file:
964 if read_quantity == '_'.join(key.split("_")[:-1]):
965 this_data = control_file[key]
966 while this_data.ndim < len(dimension_label):
967 this_data = this_data[...,np.newaxis]
968 data.append(this_data)
969 data = np.concatenate(data,axis=-1)
970 elif read_quantity in unconcatenated_keys:
971 data = control_file[read_quantity]
972 else:
973 raise ValueError(f'{read_quantity} is not a valid quantity to read. read_quantity must be one of {concatenated_keys+unconcatenated_keys}.')
974 # Set up the abscissa
975 if dimension_label[-1] == 'timestep':
976 abscissa = np.arange(data.shape[-1])/sample_rate
977 elif dimension_label[-1] == 'block_num':
978 abscissa = np.arange(data.shape[-1])
979 else:
980 raise ValueError(f"{dimension_label[-1]} is an invalid entry. How did you get here?")
981 # Set up degrees of freedom
982 if dimension_label[-2] == 'response':
983 if control_dofs is None:
984 dofs = coordinate_array(np.arange(data.shape[-2])+1,0)
985 else:
986 dofs = control_dofs
987 elif dimension_label[-2] == 'excitation':
988 if excitation_dofs is None:
989 dofs = coordinate_array(np.arange(data.shape[-2])+1,0)
990 else:
991 dofs = excitation_dofs
992 elif dimension_label[-2] == 'tone':
993 dofs = coordinate_array(np.arange(data.shape[-2])+1,0)
994 else:
995 raise ValueError(f"{dimension_label[-2]} is an invalid entry. How did you get here?")
996 if any([dimension == 'tone' for dimension in dimension_label]):
997 comment1 = control_file['names'].reshape(*[-1 if dimension == 'tone' else 1 for dimension in dimension_label][:-1])
998 else:
999 comment1 = ''
1000 # Construct the TimeHistoryArray
1001 return_data.append(data_array(FunctionTypes.TIME_RESPONSE,
1002 abscissa,
1003 data,
1004 dofs,
1005 comment1
1006 ))
1007 if return_single:
1008 return_data = return_data[0]
1009 return return_data