Coverage for src / sdynpy / fileio / sdynpy_uff.py: 15%
155 statements
« prev ^ index » next coverage.py v7.13.4, created at 2026-03-11 16:22 +0000
« prev ^ index » next coverage.py v7.13.4, created at 2026-03-11 16:22 +0000
1# -*- coding: utf-8 -*-
2"""
3Interface to the universal file format (UFF).
5Using the functions in this module, one can read and write unv files.
6"""
7"""
8Copyright 2022 National Technology & Engineering Solutions of Sandia,
9LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
10Government retains certain rights in this software.
12This program is free software: you can redistribute it and/or modify
13it under the terms of the GNU General Public License as published by
14the Free Software Foundation, either version 3 of the License, or
15(at your option) any later version.
17This program is distributed in the hope that it will be useful,
18but WITHOUT ANY WARRANTY; without even the implied warranty of
19MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20GNU General Public License for more details.
22You should have received a copy of the GNU General Public License
23along with this program. If not, see <https://www.gnu.org/licenses/>.
24"""
26import numpy as np
28class UFFReadError(Exception):
29 """Exception to be used when there is an error reading a UNV file"""
31 def __init__(self, value):
32 self.value = value
34 def __str__(self):
35 return repr(self.value)
38read_type_functions = {'A': str,
39 'I': int,
40 'X': str,
41 'E': float,
42 'D': float}
44write_type_format_operations = {'A': '{{:<{:}s}}',
45 'I': '{{:>{:}d}}',
46 'X': '{{:>{:}s}}',
47 'E': '{{:>{:}E}}',
48 'D': '{{:>{:}e}}'
49 }
52def parse_uff_line(line, format_specs, read_number=None):
53 """
54 Parses a line from a universal file format
56 Parameters
57 ----------
58 line : str
59 A line from a unv file
60 format_specs : iterable
61 The format specifiers for the line that determines how the string is
62 transformed into data
63 read_number : int, optional
64 The number of entries to read. Will repeat the format specifiers if
65 necessary
67 Raises
68 ------
69 UFFReadError
70 Raised if an error occurs reading the file.
72 Returns
73 -------
74 output : list
75 Data from this line of the unv file.
77 """
78 if read_number is None:
79 read_number = len(format_specs)
80 else:
81 copies = int((read_number - 1) // len(format_specs) + 1)
82 format_specs = (copies * format_specs)[:read_number]
83 position = 0
84 output = []
85 for spec in format_specs:
86 spec_type = spec[0].upper()
87 spec_length = int(spec[1:].split('.')[0])
88 if spec_type == 'X':
89 position += spec_length
90 continue
91 try:
92 type_function = read_type_functions[spec_type]
93 except KeyError:
94 raise UFFReadError('Invalid Type {:}, should be one of {:}'.format(
95 spec_type, [key for key in read_type_functions.keys()]))
96 position_string = line[position:position + spec_length].rstrip()
97 if position_string == '':
98 output.append(None)
99 else:
100 try:
101 output.append(type_function(position_string))
102 except ValueError:
103 raise UFFReadError('Line "{:}"\n characters "{:}" cannot be transformed to type {:}'.format(
104 line, position_string, spec_type))
105 position += spec_length
106 return output
109def parse_uff_lines(lines, line_format_spec, read_number):
110 """
111 Reads multiple lines from a universal file
113 Parameters
114 ----------
115 lines : iterable
116 List of lines to read
117 line_format_spec : iterable
118 The format specifiers for the line that determines how the string is
119 transformed into data
120 read_number : int
121 The number of entries to read
123 Returns
124 -------
125 output
126 Data from the universal file over the specified lines.
127 lines_read : int
128 Number of lines read from the universal file
130 """
131 full_lines = read_number // len(line_format_spec)
132 remainder = read_number % len(line_format_spec)
133 output = []
134 for i, line in zip(range(full_lines), lines):
135 output += parse_uff_line(line, line_format_spec)
136 if remainder > 0:
137 output += parse_uff_line(lines[full_lines], line_format_spec, remainder)
138 return output, full_lines + (1 if remainder > 0 else 0)
141def write_uff_line(data, format_specs, fill_line=True):
142 """
143 Write data to universal file format
145 Parameters
146 ----------
147 data : iterable
148 The data to write to the universal file.
149 format_specs : iterable
150 The format specification for each value in data
151 fill_line : bool, optional
152 Fill the line completely. The default is True.
154 Returns
155 -------
156 line : str
157 A string representation of the data in the universal file format
159 """
160 write_number = len(data)
161 non_X_format_specs = [
162 format_spec for format_spec in format_specs if 'X' not in format_spec.upper()]
163 copies = int((write_number - 1) // len(non_X_format_specs) + 1)
164 format_specs = (copies * (format_specs + ['\n']))
165 line = ''
166 data_index = 0
167 for spec in format_specs:
168 if spec == '\n':
169 line += '\n'
170 continue
171 spec_type = spec[0].upper()
172 spec_format = spec[1:]
173 spec_length = int(spec_format.split('.')[0])
174 if spec_type == 'X':
175 new_data = (write_type_format_operations[spec_type].format(spec_format)).format('')
176 elif data[data_index] is None:
177 new_data = (write_type_format_operations['X'].format(spec_length)).format('')
178 data_index += 1
179 else:
180 new_data = (write_type_format_operations[spec_type].format(
181 spec_format)).format(data[data_index])
182 data_index += 1
183 if len(new_data) > spec_length:
184 print('Data to write {:} longer than specification length of {:}. Truncating!'.format(
185 new_data, spec_length))
186 new_data = new_data[:spec_length]
187 line += new_data
188 if data_index == len(data):
189 break
190 if fill_line:
191 lines = line.split('\n')
192 line = '\n'.join('{:<80s}'.format(this_line) for this_line in lines) + '\n'
193 return line
196# To add a data set for reading, you must write it in a file defining a read
197# and write command, import it here, then add it to the dataset dictionary
198# using the dataset number as the key.
200from .sdynpy_uff_datasets import sdynpy_uff_dataset_55 as dataset_55 # noqa: E402
201from .sdynpy_uff_datasets import sdynpy_uff_dataset_58 as dataset_58 # noqa: E402
202from .sdynpy_uff_datasets import sdynpy_uff_dataset_82 as dataset_82 # noqa: E402
203from .sdynpy_uff_datasets import sdynpy_uff_dataset_151 as dataset_151 # noqa: E402
204from .sdynpy_uff_datasets import sdynpy_uff_dataset_164 as dataset_164 # noqa: E402
205from .sdynpy_uff_datasets import sdynpy_uff_dataset_1858 as dataset_1858 # noqa: E402
206from .sdynpy_uff_datasets import sdynpy_uff_dataset_2400 as dataset_2400 # noqa: E402
207from .sdynpy_uff_datasets import sdynpy_uff_dataset_2411 as dataset_2411 # noqa: E402
208from .sdynpy_uff_datasets import sdynpy_uff_dataset_2412 as dataset_2412 # noqa: E402
209from .sdynpy_uff_datasets import sdynpy_uff_dataset_2420 as dataset_2420 # noqa: E402
211dataset_dict = {55: dataset_55,
212 58: dataset_58,
213 82: dataset_82,
214 151: dataset_151,
215 164: dataset_164,
216 1858: dataset_1858,
217 2400: dataset_2400,
218 2411: dataset_2411,
219 2412: dataset_2412,
220 2420: dataset_2420}
223def readuff(filename, datasets=None, verbose=False):
224 """
225 Read a universal file
227 Parameters
228 ----------
229 filename : str
230 Path to the file that should be read.
231 datasets : iterable, optional
232 List of dataset id numbers to read. The default is None.
233 verbose : bool, optional
234 Output extra information when reading the file. The default is False.
236 Raises
237 ------
238 UFFReadError
239 Raised if errors are found when reading the file.
241 Returns
242 -------
243 dict
244 Dictionary with keys as the dataset id numbers and values containing the
245 data from the universal file in those datasets.
247 """
248 return_dict = {}
249 with open(filename, 'rb') as f:
250 line = b'\n'
251 line_num = 0
252 dataset_line_num = 0
253 # Loop through the file until it is at its end
254 while line != b'':
255 # Find the first delimiter line
256 # Here we want to find a line that has -1 in the 5th and 6th column,
257 # and make sure that -1 isn't the only thing in the line to make sure
258 # that any comments at the start of the file don't accidentally line up
259 while not line[4:6] == b'-1' and not line.strip() == b'-1':
260 line = f.readline()
261 line_num += 1
262 if line == b'':
263 break
264 if line == b'':
265 break
266 dataset_line_num = line_num
267 # Load in the dataset specifier
268 line = f.readline()
269 line_num += 1
270 try:
271 # Make sure that we can convert it to an integer
272 (dataset, b, byte_ordering, floating_point_format,
273 num_ascii_lines_following, num_bytes_following, *not_used) = parse_uff_line(
274 line.decode(), ['I6', 'A1', 'I6', 'I6', 'I12', 'I12', 'I6', 'I6', 'I12', 'I12'])
275 is_binary = b is not None
276 if is_binary:
277 if byte_ordering is None:
278 byte_ordering = 1
279 if num_ascii_lines_following is None:
280 num_ascii_lines_following = 11
281 except UFFReadError:
282 raise UFFReadError(
283 'Improperly formatted dataset specification at line {}, {}'.format(line_num, line))
284 if verbose:
285 print('Reading Dataset {:} at line {:}'.format(dataset, dataset_line_num))
286 # Load in the dataset information
287 line = f.readline()
288 line_num += 1
289 data = []
290 # Loop through the file until we find the delimiter
291 while (not (not is_binary and line[4:6] == b'-1' and line.strip() == b'-1')
292 and
293 not (is_binary and line.rstrip()[-6:] == b' -1' and line_num - dataset_line_num - 1 > num_ascii_lines_following)):
294 data.append(line)
295 line = f.readline()
296 line_num += 1
297 if line == '':
298 raise UFFReadError(
299 'File ended before dataset starting at line {} was ended.'.format(dataset_line_num))
300 if is_binary and line.rstrip()[-6:] == b' -1' and line_num - dataset_line_num - 1 > num_ascii_lines_following:
301 data.append(line.rstrip()[:-6])
302 try:
303 read_fn = dataset_dict[dataset].read
304 except KeyError:
305 print('Dataset {} at line {} is not implemented, skipping...'.format(
306 dataset, dataset_line_num))
307 # Read the next line in preparation for the next loop of the script
308 line = f.readline()
309 line_num += 1
310 continue
311 except AttributeError:
312 print('Dataset {} at line {} read function is not implemented, skipping...'.format(
313 dataset, dataset_line_num))
314 # Read the next line in preparation for the next loop of the script
315 line = f.readline()
316 line_num += 1
317 continue
318 if datasets is not None and dataset not in datasets:
319 print('Skipping dataset {} at line {} due to it not being specified in the `datasets` input argument'.format(
320 dataset, dataset_line_num))
321 line = f.readline()
322 line_num += 1
323 continue
324 if is_binary:
325 dataset_obj = read_fn(data, is_binary, byte_ordering,
326 floating_point_format, num_ascii_lines_following,
327 num_bytes_following)
328 else:
329 dataset_obj = read_fn(data)
330 if isinstance(dataset_obj, UFFReadError):
331 raise UFFReadError('In dataset starting at line {}, {}'.format(
332 dataset_line_num, dataset_obj.value))
333 if dataset in return_dict:
334 return_dict[dataset].append(dataset_obj)
335 else:
336 return_dict[dataset] = [dataset_obj]
337 # Read the next line in preparation for the next loop of the script
338 line = f.readline()
339 line_num += 1
341 return return_dict
344readunv = readuff