Coverage for src / sdynpy / fileio / sdynpy_vic.py: 10%

167 statements  

« prev     ^ index     » next       coverage.py v7.13.4, created at 2026-03-11 16:22 +0000

1# -*- coding: utf-8 -*- 

2""" 

3Functions for working with camera output data from VIC3D 

4""" 

5""" 

6Copyright 2022 National Technology & Engineering Solutions of Sandia, 

7LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S. 

8Government retains certain rights in this software. 

9 

10This program is free software: you can redistribute it and/or modify 

11it under the terms of the GNU General Public License as published by 

12the Free Software Foundation, either version 3 of the License, or 

13(at your option) any later version. 

14 

15This program is distributed in the hope that it will be useful, 

16but WITHOUT ANY WARRANTY; without even the implied warranty of 

17MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

18GNU General Public License for more details. 

19 

20You should have received a copy of the GNU General Public License 

21along with this program. If not, see <https://www.gnu.org/licenses/>. 

22""" 

23 

24import zipfile 

25import xml.etree.ElementTree as et 

26import numpy as np 

27from scipy.io import loadmat 

28from ..core.sdynpy_coordinate import coordinate_array 

29from ..core.sdynpy_data import data_array, FunctionTypes 

30from ..core.sdynpy_geometry import Geometry, node_array, coordinate_system_array 

31from time import time 

32 

33REPORT_TIME = 5 

34 

35 

36def extract_vic_cal_parameters(z3d_file): 

37 """ 

38 Extracts camera calibration parameters from a VIC3D .z3d file. 

39 

40 Parameters 

41 ---------- 

42 z3d_file : str 

43 File path to the VIC3D .z3d file. 

44 

45 Returns 

46 ------- 

47 out_data : dict 

48 A dictionary with keys 'intrinsics_0', 'extrinsics_0', 'distortion_0', 

49 'intrinsics_1', 'extrinsics_1', and 'distortion_1', which contain the 

50 intrinsic, extrinsic, and distortion parameters for the left camera (0) 

51 and right camera (1). 

52 

53 """ 

54 with zipfile.ZipFile(z3d_file, 'r') as zf: 

55 with zf.open('project.xml') as pxml: 

56 xml = ''.join([line.decode() for line in pxml.readlines()]) 

57 

58 xml_data = et.fromstring(xml) 

59 calibration_info = xml_data.find('calibration') 

60 

61 camera_infos = calibration_info.findall('camera') 

62 

63 camera_0_info = [camera for camera in camera_infos if camera.attrib['id'] == '0'][0] 

64 camera_1_info = [camera for camera in camera_infos if camera.attrib['id'] == '1'][0] 

65 

66 intrinsics_0 = [float(val) for val in camera_0_info.find('intrinsics').text.split(' ')] 

67 extrinsics_0 = [float(val) for val in camera_0_info.find('orientation').text.split(' ')] 

68 distortion_0 = [float(val) for val in camera_0_info.find('distortion').text.split(' ')] 

69 intrinsics_1 = [float(val) for val in camera_1_info.find('intrinsics').text.split(' ')] 

70 extrinsics_1 = [float(val) for val in camera_1_info.find('orientation').text.split(' ')] 

71 distortion_1 = [float(val) for val in camera_1_info.find('distortion').text.split(' ')] 

72 

73 out_data = {'intrinsics_0': intrinsics_0, 

74 'extrinsics_0': extrinsics_0, 

75 'distortion_0': distortion_0, 

76 'intrinsics_1': intrinsics_1, 

77 'extrinsics_1': extrinsics_1, 

78 'distortion_1': distortion_1} 

79 

80 return out_data 

81 

82 

83def vic_angles_from_matrix(R): 

84 '''Extract Bryant Angles from a rotation matrix. 

85 

86 Extracts rotation angles r_x, r_y, r_z for the x,y,z rotation sequence. 

87 

88 Parameters 

89 ---------- 

90 R : np.ndarray 

91 A 3x3 array representing a rotation matrix. 

92 

93 Returns 

94 ------- 

95 rx : float 

96 Rotation angle about X in radians 

97 ry : float 

98 Rotation angle about Y in radians 

99 rz : float 

100 Rotation angle about Z in radians 

101 ''' 

102 if abs(R[0, 2]) < 0.9999: 

103 ry = -np.arcsin(R[0, 2]) 

104 rz = np.arctan2(R[0, 1] / np.cos(ry), R[0, 0] / np.cos(ry)) 

105 rx = np.arctan2(R[1, 2] / np.cos(ry), R[2, 2] / np.cos(ry)) 

106 else: 

107 if R[0, 2] > 0: 

108 ry = -np.pi / 2 

109 rz = 0.0 

110 rx = np.arctan2(-R[2, 1], R[1, 1]) 

111 if R[0, 2] < 0: 

112 ry = np.pi / 2 

113 rz = 0.0 

114 rx = np.arctan2(-R[2, 1], R[1, 1]) 

115 return rx, ry, rz 

116 

117 

118def get_vic_camera_parameters(K, RT): 

119 '''Computes VIC3D Camera Information for the project.xml file 

120 

121 This function computes VIC3D Camera Calibration information from a camera 

122 intrinsic and extrinsic matrix. This can be placed into a the project.xml 

123 file inside the unzipped Z3D project file. 

124 

125 Parameters 

126 ---------- 

127 K : ndarray 

128 A 3 x 3 upper triangular array consisting of the camera intrinisc 

129 parameters. 

130 RT : ndarray 

131 A 3 x 4 array where the first three columns are an orthogonal matrix 

132 denoting a rotation matrix, and the last column is a translation. 

133 

134 Returns 

135 ------- 

136 intrinsics : iterable 

137 A list of parameters that define the camera intrinsic properties. They 

138 are ordered cx, cy, fx, fy, s 

139 extrinsics : iterable 

140 A list of parameters that define the camera extrinsic properties. They 

141 are ordered rx, ry, rz, tx, ty, tz 

142 

143 ''' 

144 R = RT[:3, :3] 

145 T = RT[:3, 3, np.newaxis] 

146 Rw = R.T 

147 rx, ry, rz = vic_angles_from_matrix(Rw) 

148 tx, ty, tz = T.flatten() 

149 fx = K[0, 0] 

150 fy = K[1, 1] 

151 s = K[0, 1] 

152 cx = K[0, 2] 

153 cy = K[1, 2] 

154 

155 return [cx, cy, fx, fy, s], [rx * 180 / np.pi, ry * 180 / np.pi, rz * 180 / np.pi, tx * 1000, ty * 1000, tz * 1000] 

156 

157 

158def matrix_from_bryant_angles(rx, ry, rz): 

159 '''Computes a rotation matrix from Bryant Angles 

160 

161 Parameters 

162 ---------- 

163 rx : float 

164 Rotation angle about X in radians 

165 ry : float 

166 Rotation angle about Y in radians 

167 rz : float 

168 Rotation angle about Z in radians 

169 

170 Returns 

171 ------- 

172 R : np.ndarray 

173 A 3x3 array representing a rotation matrix. 

174 ''' 

175 c1 = np.cos(rx) 

176 c2 = np.cos(ry) 

177 c3 = np.cos(rz) 

178 s1 = np.sin(rx) 

179 s2 = np.sin(ry) 

180 s3 = np.sin(rz) 

181 D = np.array([[1, 0, 0], [0, c1, s1], [0, -s1, c1]]) 

182 C = np.array([[c2, 0, -s2], [0, 1, 0], [s2, 0, c2]]) 

183 B = np.array([[c3, s3, 0], [-s3, c3, 0], [0, 0, 1]]) 

184 

185 return D @ C @ B 

186 

187 

188def camera_matrix_from_vic_parameters(intrinsics, extrinsics=None): 

189 '''Computes K and RT Camera matrices from information in the project.xml file 

190 

191 This function takes VIC3D Camera Calibration information and makes a camera 

192 intrinsic and extrinsic matrix. This can be extracted from the project.xml 

193 file inside the unzipped Z3D project file. 

194 

195 Parameters 

196 ---------- 

197 intrinsics : iterable 

198 A list of parameters that define the camera intrinsic properties. They 

199 are ordered cx, cy, fx, fy, s 

200 extrinsics : iterable 

201 A list of parameters that define the camera extrinsic properties. They 

202 are ordered rx, ry, rz, tx, ty, tz 

203 

204 Returns 

205 ------- 

206 K : ndarray 

207 A 3 x 3 upper triangular array consisting of the camera intrinisc 

208 parameters. 

209 RT : ndarray 

210 A 3 x 4 array where the first three columns are an orthogonal matrix 

211 denoting a rotation matrix, and the last column is a translation. 

212 ''' 

213 K = np.array([[intrinsics[2], intrinsics[4], intrinsics[0]], 

214 [0, intrinsics[3], intrinsics[1]], 

215 [0, 0, 1]]) 

216 if extrinsics is not None: 

217 rotations = [r * np.pi / 180 for r in extrinsics[:3]] 

218 T = np.array([t / 1000 for t in extrinsics[3:]])[:, np.newaxis] 

219 Rw = matrix_from_bryant_angles(*rotations) 

220 R = Rw.T 

221 RT = np.concatenate((R, T), axis=1) 

222 return K, RT 

223 else: 

224 return K 

225 

226 

227def read_vic3D_mat_files(files, read_3D=True, read_2D=False, read_quality=False, 

228 sigma_tol=0.0, element_triangulation_condition=3.0, dt=1.0, 

229 element_color_order=[1, 3, 5, 7, 9, 11, 13, 15, 2, 4, 6, 8, 10, 12, 14], 

230 allow_dropouts=False): 

231 """ 

232 Reads in data from Correlated Solutions' VIC3D 

233 

234 Parameters 

235 ---------- 

236 files : Iterable of str 

237 List of strings pointing to .mat files exported from VIC3D. 

238 read_3D : bool, optional 

239 Flag specifying whether or not to extract 3D information. The default 

240 is True. 

241 read_2D : bool, optional 

242 Flag specifying whether or not to extract 2D (pixel) information. The 

243 default is False. 

244 read_quality : bool, optional 

245 Flag specifying whether or not to output quality (sigma) information. 

246 Note that the quality value will be read regardless in order to discard 

247 bad subsets. This flag only specifies whether the values are returned 

248 to the user. The default is False. 

249 sigma_tol : float, optional 

250 Tolerance used to discard bad subsets. The default is 0.0. 

251 element_triangulation_condition : float, optional 

252 Maximum condition number for triangular elements generated from subset. 

253 positions. The default is 3.0. 

254 dt : float, optional 

255 Time spacing to be used in the returned TimeHistoryArrays. 

256 The default is 1.0. 

257 element_color_order : Iterable, optional 

258 Specifies the color order used when creating elements from the various 

259 areas of interest in the VIC3D output. The first area of interest will 

260 have color specified by the first entry in the array. The array will 

261 loop around if more areas of interest exist than values in the array. 

262 The default is [1,3,5,7,9,11,13,15,2,4,6,8,10,12,14]. 

263 allow_dropouts : bool, optional 

264 Specifies whether or not to allow data to drop out (True) or if the 

265 entire point is discarded if any dropouts are detected (False). 

266 Default is False. 

267 

268 Returns 

269 ------- 

270 geometry_3D : Geometry 

271 Geometry object consisting of the 3D node positions from the test. 

272 Only returned if read_3D is True 

273 time_data_3D : TimeHistoryArray 

274 3D Displacement data from the test. Only returned if read_3D is True. 

275 geometry_2D : Geometry 

276 Geometry object consisting of the 2D (pixel) node positions from the 

277 test. Only returned if read_2D is True 

278 time_data_2D : TimeHistoryArray 

279 2D Pixel Displacement data from the test. Only returned if read_2D is 

280 True. 

281 time_data_2D_disparity : TimeHistoryArray 

282 2D Pixel Displacement data from the test for the second camera. Adding 

283 this array to the original pixel positions will result in the pixel 

284 positions over time for the right image. Only returned if read_2D is 

285 True. 

286 sigma_data : TimeHistoryArray 

287 Data quality metric for the test over time. Only returned if 

288 read_quality is True 

289 

290 """ 

291 start_time = time() 

292 total_times = len(files) 

293 for ifile, file in enumerate(files): 

294 this_time = time() 

295 if this_time - start_time > REPORT_TIME: 

296 print('Reading File {:} of {:} ({:0.2f}%)'.format( 

297 ifile + 1, total_times, (ifile + 1) / total_times * 100)) 

298 start_time += REPORT_TIME 

299 data = loadmat(file) 

300 # print('Reading Timestep {:}'.format(ifile)) 

301 # Set up the initial measurement 

302 if ifile == 0: 

303 # print(' Setting up variables...') 

304 # Get the area of interest flags 

305 aois = sorted([key.replace('sigma', '') for key in data.keys() if 'sigma' in key]) 

306 # Get the degrees of freedom numbers 

307 ndofs = [np.prod(data['sigma' + aoi].shape) for aoi in aois] 

308 total_dofs = np.sum(ndofs) 

309 boundaries = np.cumsum(ndofs) 

310 boundaries = np.concatenate(([0], boundaries)) 

311 aoi_slices = [slice(boundaries[i], boundaries[i + 1]) for i in range(len(ndofs))] 

312 # Set up variables 

313 variable_array = {} 

314 variable_array['sigma'] = np.empty((total_dofs, total_times)) 

315 variable_array['x'] = np.empty(total_dofs) 

316 variable_array['y'] = np.empty(total_dofs) 

317 if read_3D: 

318 variable_array['X'] = np.empty((total_dofs)) 

319 variable_array['Y'] = np.empty((total_dofs)) 

320 variable_array['Z'] = np.empty((total_dofs)) 

321 variable_array['U'] = np.empty((total_dofs, total_times)) 

322 variable_array['V'] = np.empty((total_dofs, total_times)) 

323 variable_array['W'] = np.empty((total_dofs, total_times)) 

324 if read_2D: 

325 variable_array['u'] = np.empty((total_dofs, total_times)) 

326 variable_array['v'] = np.empty((total_dofs, total_times)) 

327 variable_array['q'] = np.empty((total_dofs, total_times)) 

328 variable_array['r'] = np.empty((total_dofs, total_times)) 

329 # print(' ...done') 

330 # Now read in the data 

331 for name, array in variable_array.items(): 

332 # print(' Reading variable {:}'.format(name)) 

333 if array.ndim == 1 and ifile > 0: 

334 # print(' Skipping Variable {:} for timestep {:}'.format(name,ifile)) 

335 continue 

336 for iaoi, aoi in enumerate(aois): 

337 # print(' Reading variable {:}{:} from AoI {:}'.format(name,aoi,iaoi)) 

338 key_name = name + aoi 

339 key_data = data[key_name] 

340 array_index = (aoi_slices[iaoi],) + ((ifile,) if array.ndim > 1 else ()) 

341 array[array_index] = key_data.flatten() 

342 # Now do some bookkeeping activities 

343 original_aoi_indices = np.empty(total_dofs) 

344 for i, aoi_slice in enumerate(aoi_slices): 

345 original_aoi_indices[aoi_slice] = i + 1 

346 # Reduce to just "good" dofs 

347 if allow_dropouts: 

348 # Only remove data if the first sample is bad 

349 good_dofs = variable_array['sigma'][..., 0] > sigma_tol 

350 else: 

351 # Remove data if any sample is bad 

352 good_dofs = np.all(variable_array['sigma'] > sigma_tol, axis=-1) 

353 for variable in variable_array: 

354 variable_array[variable] = variable_array[variable][good_dofs] 

355 original_aoi_indices = original_aoi_indices[good_dofs] 

356 boundaries = np.cumsum([np.count_nonzero(original_aoi_indices == i + 1) 

357 for i in range(len(aois))]) 

358 boundaries = np.concatenate(([0], boundaries)) 

359 aoi_slices = [slice(boundaries[i], boundaries[i + 1]) for i in range(len(ndofs))] 

360 # Create node numbers for geometry 

361 num_nodes_per_aoi = np.diff(boundaries) 

362 node_length = max([len(str(num)) for num in num_nodes_per_aoi]) 

363 node_numbers = np.empty(boundaries[-1]) 

364 for i, (num_nodes, aoi_slice) in enumerate(zip(num_nodes_per_aoi, aoi_slices)): 

365 node_numbers[aoi_slice] = np.arange(num_nodes) + (i + 1) * 10**node_length + 1 

366 # Start accumulating output variables 

367 output_variables = [] 

368 # Create the 2D geometry regardless, used for element triangulation 

369 node_positions = np.array([variable_array[val] for val in 'xy']) 

370 node_positions = np.concatenate( 

371 [node_positions, np.zeros((1, node_positions.shape[-1]))], axis=0) 

372 geometry_2D = Geometry( 

373 node_array(node_numbers, node_positions.T), 

374 coordinate_system_array(1)) 

375 # Create triangulation 

376 element_arrays = [geometry_2D.node[aoi_slice].triangulate( 

377 geometry_2D.coordinate_system, condition_threshold=element_triangulation_condition) for aoi_slice in aoi_slices 

378 if geometry_2D.node[aoi_slice].size > 3] 

379 for i in range(len(element_arrays)): 

380 element_arrays[i].color = element_color_order[i % len(element_color_order)] 

381 geometry_2D.element = np.concatenate(element_arrays) 

382 if read_3D: 

383 # Create the geometry 

384 node_positions = np.array([variable_array[val] for val in 'XYZ']) 

385 geometry_3D = Geometry( 

386 node_array(node_numbers, node_positions.T), 

387 coordinate_system_array(1)) 

388 geometry_3D.element = np.concatenate(element_arrays) 

389 output_variables.append(geometry_3D) 

390 # Create time history 

391 node_displacements = np.array([variable_array[val] 

392 for val in 'UVW']).reshape(-1, total_times) 

393 node_dofs = coordinate_array(node_numbers, np.array([1, 2, 3])[:, np.newaxis]).flatten() 

394 timesteps = np.arange(total_times) * dt 

395 time_data_3D = data_array(FunctionTypes.TIME_RESPONSE, timesteps, node_displacements, 

396 node_dofs[:, np.newaxis]) 

397 output_variables.append(time_data_3D) 

398 if read_2D: 

399 output_variables.append(geometry_2D) 

400 # Create time history 

401 node_displacements = np.array([variable_array[val] 

402 for val in 'uv']).reshape(-1, total_times) 

403 node_dofs = coordinate_array(node_numbers, np.array([1, 2])[:, np.newaxis]).flatten() 

404 timesteps = np.arange(total_times) * dt 

405 time_data_2D = data_array(FunctionTypes.TIME_RESPONSE, timesteps, node_displacements, 

406 node_dofs[:, np.newaxis]) 

407 output_variables.append(time_data_2D) 

408 # Create time history for other camera 

409 node_displacements = np.array([variable_array[val] 

410 for val in 'qr']).reshape(-1, total_times) 

411 node_dofs = coordinate_array(node_numbers, np.array([1, 2])[:, np.newaxis]).flatten() 

412 timesteps = np.arange(total_times) * dt 

413 time_data_2D = data_array(FunctionTypes.TIME_RESPONSE, timesteps, node_displacements, 

414 node_dofs[:, np.newaxis]) 

415 output_variables.append(time_data_2D) 

416 if read_quality: 

417 sigma_data = variable_array['sigma'] 

418 sigma_dofs = coordinate_array(node_numbers, 0) 

419 time_data = data_array(FunctionTypes.TIME_RESPONSE, timesteps, sigma_data, 

420 sigma_dofs[:, np.newaxis]) 

421 output_variables.append(time_data) 

422 return output_variables