Coverage for  / opt / hostedtoolcache / Python / 3.11.14 / x64 / lib / python3.11 / site-packages / rattlesnake / components / utilities.py: 55%

378 statements  

« prev     ^ index     » next       coverage.py v7.13.4, created at 2026-02-27 18:22 +0000

1# -*- coding: utf-8 -*- 

2""" 

3This file contains a number of helper classes for the general controller. 

4 

5Rattlesnake Vibration Control Software 

6Copyright (C) 2021 National Technology & Engineering Solutions of Sandia, LLC 

7(NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S. 

8Government retains certain rights in this software. 

9 

10This program is free software: you can redistribute it and/or modify 

11it under the terms of the GNU General Public License as published by 

12the Free Software Foundation, either version 3 of the License, or 

13(at your option) any later version. 

14 

15This program is distributed in the hope that it will be useful, 

16but WITHOUT ANY WARRANTY; without even the implied warranty of 

17MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

18GNU General Public License for more details. 

19 

20You should have received a copy of the GNU General Public License 

21along with this program. If not, see <https://www.gnu.org/licenses/>. 

22""" 

23 

24import importlib.util 

25import multiprocessing as mp 

26import multiprocessing.queues as mp_queues 

27import os 

28import random 

29import string 

30import time 

31from datetime import datetime 

32from enum import Enum 

33from typing import Dict, List, Tuple 

34 

35import numpy as np 

36import scipy.signal as sig 

37from qtpy import QtWidgets 

38 

39 

40def log_file_task(queue: mp.queues.Queue): 

41 """A multiprocessing function that collects logging data and writes to file 

42 

43 Parameters 

44 ---------- 

45 queue : mp.queues.Queue 

46 The multiprocessing queue to collect logging messages from 

47 

48 

49 """ 

50 with open("Rattlesnake.log", "w", encoding="utf-8") as f: 

51 while True: 

52 output = queue.get() 

53 if output == "quit": 

54 f.write("Program quitting, logging terminated.") 

55 break 

56 num_newlines = output.count("\n") 

57 if num_newlines > 1: 

58 output = output.replace("\n", "////", num_newlines - 1) 

59 f.write(output) 

60 f.flush() 

61 

62 

63def coherence(cpsd_matrix: np.ndarray, row_column: Tuple[int] = None): 

64 """Compute coherence from a CPSD matrix 

65 

66 Parameters 

67 ---------- 

68 cpsd_matrix : np.ndarray : 

69 A 3D complex numpy array where the first index corresponds to the 

70 frequency line and the second and third indices correspond to the rows 

71 and columns of the matrix. 

72 row_column : Tuple[int] : 

73 Optional argument to compute the coherence at just a single (row,column) 

74 pair. (Default value = Compute Entire Matrix) 

75 

76 Returns 

77 ------- 

78 coherence : np.ndarray : 

79 3D array of coherence values where the [i,j,k] entry corresponds to the 

80 coherence of the CPSD matrix for the ith frequency line, jth row, and 

81 kth column. 

82 

83 """ 

84 if row_column is None: 

85 diag = np.einsum("ijj->ij", cpsd_matrix) 

86 return np.real(np.abs(cpsd_matrix) ** 2 / (diag[:, :, np.newaxis] * diag[:, np.newaxis, :])) 

87 else: 

88 row, column = row_column 

89 return np.real( 

90 np.abs(cpsd_matrix[:, row, column]) ** 2 

91 / (cpsd_matrix[:, row, row] * cpsd_matrix[:, column, column]) 

92 ) 

93 

94 

95class Channel: 

96 """Property container for a single channel in the controller.""" 

97 

98 def __init__( 

99 self, 

100 node_number, 

101 node_direction, 

102 comment, 

103 serial_number, 

104 triax_dof, 

105 sensitivity, 

106 unit, 

107 make, 

108 model, 

109 expiration, 

110 physical_device, 

111 physical_channel, 

112 channel_type, 

113 minimum_value, 

114 maximum_value, 

115 coupling, 

116 excitation_source, 

117 excitation, 

118 feedback_device, 

119 feedback_channel, 

120 warning_level, 

121 abort_level, 

122 ): 

123 """Property container for a single channel in the controller. 

124 

125 Parameters 

126 ---------- 

127 node_number : str : 

128 Metadata specifying the node number 

129 node_direction : str : 

130 Metadata specifying the direction at a node 

131 comment : str : 

132 Metadata specifying any additional comments on the channel 

133 serial_number : str : 

134 Metadata specifying the serial number of the instrument 

135 triax_dof : str : 

136 Metadata specifying the degree of freedom on a triaxial sensor 

137 sensitivity : str : 

138 Sensitivity value of the sensor in mV/engineering unit 

139 unit : str : 

140 The engineering unit of the sensor 

141 make : str : 

142 Metadata specifying the make of the sensor 

143 model : str : 

144 Metadata specifying the model of the sensor 

145 expiration : str : 

146 Metadata specifying the expiration date of the sensor 

147 physical_device : str : 

148 Physical hardware that the instrument is connected to 

149 physical_channel : str : 

150 Channel in the physical hardware that the instrument is connected to 

151 channel_type : str : 

152 Type of channel 

153 minimum_value : str : 

154 Minimum value of the channel in volts 

155 maximum_value : str : 

156 Maximum value of the channel in volts 

157 coupling : str : 

158 Coupling type for the channel 

159 excitation_source : str : 

160 Source for the signal conditioning for the sensor 

161 excitation : str : 

162 Level of excitation for the signal conditioning for the sensor 

163 feedback_device : str : 

164 Physical hardware that the source output teed into this channel 

165 originates from 

166 feedback_channel : str : 

167 Channel on the physical hardware that is teed into this channel 

168 warning_level : str : 

169 Level at which warnings will be flagged on the monitor 

170 abort_level : str : 

171 Level at which the system will shut down 

172 """ 

173 self.node_number = node_number 

174 self.node_direction = node_direction 

175 self.comment = comment 

176 self.serial_number = serial_number 

177 self.triax_dof = triax_dof 

178 self.sensitivity = sensitivity 

179 self.make = make 

180 self.model = model 

181 self.expiration = expiration 

182 self.physical_device = physical_device 

183 self.physical_channel = physical_channel 

184 self.channel_type = channel_type 

185 self.unit = unit 

186 self.minimum_value = minimum_value 

187 self.maximum_value = maximum_value 

188 self.coupling = coupling 

189 self.excitation_source = excitation_source 

190 self.excitation = excitation 

191 self.feedback_device = feedback_device 

192 self.feedback_channel = feedback_channel 

193 self.warning_level = warning_level 

194 self.abort_level = abort_level 

195 

196 @classmethod 

197 def from_channel_table_row(cls, row: Tuple[str]): 

198 """Creates a Channel object from a row in the channel table 

199 

200 Parameters 

201 ---------- 

202 row : iterable : 

203 Iterable of strings from a single row of the channel table 

204 

205 

206 Returns 

207 ------- 

208 channel : Channel 

209 A channel object containing the data in the given row of the 

210 channel table. 

211 

212 """ 

213 new_row = [None if val.strip() == "" else val for val in row] 

214 physical_device = new_row[10] 

215 if physical_device is None: 

216 return None 

217 node_number = new_row[0] 

218 node_direction = new_row[1] 

219 comment = new_row[2] 

220 serial_number = new_row[3] 

221 triax_dof = new_row[4] 

222 sensitivity = new_row[5] 

223 unit = new_row[6] 

224 make = new_row[7] 

225 model = new_row[8] 

226 expiration = new_row[9] 

227 physical_channel = new_row[11] 

228 channel_type = new_row[12] 

229 minimum_value = new_row[13] 

230 maximum_value = new_row[14] 

231 coupling = new_row[15] 

232 excitation_source = new_row[16] 

233 excitation = new_row[17] 

234 feedback_device = new_row[18] 

235 feedback_channel = new_row[19] 

236 warning_level = new_row[20] 

237 abort_level = new_row[21] 

238 return cls( 

239 node_number, 

240 node_direction, 

241 comment, 

242 serial_number, 

243 triax_dof, 

244 sensitivity, 

245 unit, 

246 make, 

247 model, 

248 expiration, 

249 physical_device, 

250 physical_channel, 

251 channel_type, 

252 minimum_value, 

253 maximum_value, 

254 coupling, 

255 excitation_source, 

256 excitation, 

257 feedback_device, 

258 feedback_channel, 

259 warning_level, 

260 abort_level, 

261 ) 

262 

263 

264class DataAcquisitionParameters: 

265 """Container to hold the global data acquisition parameters of the controller""" 

266 

267 def __init__( 

268 self, 

269 channel_list: List[Channel], 

270 sample_rate, 

271 samples_per_read, 

272 samples_per_write, 

273 hardware, 

274 hardware_file, 

275 environment_names, 

276 environment_bools, 

277 output_oversample, 

278 **extra_parameters, 

279 ): 

280 """Container to hold the global data acquisition parameters of the controller 

281 

282 Parameters 

283 ---------- 

284 channel_list : List[Channel]: 

285 An iterable containing Channel objects for each channel in the 

286 controller. 

287 sample_rate : int : 

288 Number of samples per second that the data acquisition runs at 

289 samples_per_read : int : 

290 Number of samples the data acquisition will acquire with each read. 

291 Smaller numbers here will result in finer resolution for starting 

292 and stopping environments, but will be more computationally 

293 intensive to run. 

294 samples_per_write : int : 

295 Number of samples the data acquisition will output with each write. 

296 Smaller numbers here will result in finer resolution for starting 

297 and stopping environments, but will be more computationally 

298 intensive to run. 

299 hardware : int : 

300 Hardware index corresponding to the QCombobox selector on the 

301 Channel Table tab of the main controller 

302 hardware_file : str : 

303 Path to an optional file that completes the hardware specification, 

304 for example, a finite element model results. 

305 environment_names : List[str]: 

306 A list of the names of environments in the controller 

307 environment_bools : np.ndarray : 

308 A 2D array specifying which channels are active in which environment. 

309 If the [i,j] component is True, then the ith channel is active in 

310 the jth environment. 

311 output_oversample : int 

312 Oversample factor of the output generator 

313 maximum_acquisition_processes : int 

314 Maximum number of processes to spin up to read data off the acquisition 

315 """ 

316 self.channel_list = channel_list 

317 self.sample_rate = sample_rate 

318 self.samples_per_read = samples_per_read 

319 self.samples_per_write = samples_per_write 

320 self.hardware = hardware 

321 self.hardware_file = hardware_file 

322 self.environment_names = environment_names 

323 self.environment_active_channels = environment_bools 

324 self.output_oversample = output_oversample 

325 self.extra_parameters = extra_parameters 

326 

327 @property 

328 def nyquist_frequency(self): 

329 """Property returning the Nyquist frequency of the data acquisition.""" 

330 return self.sample_rate / 2 

331 

332 @property 

333 def output_sample_rate(self): 

334 """Property returning the output sample rate""" 

335 return self.sample_rate * self.output_oversample 

336 

337 

338def error_message_qt(title, message): 

339 """Helper class to create an error dialog. 

340 

341 Parameters 

342 ---------- 

343 title : str : 

344 Title of the window that the error message will appear in. 

345 message : str : 

346 Error message that will be displayed. 

347 

348 """ 

349 QtWidgets.QMessageBox.critical(None, title, message) 

350 

351 

352class VerboseMessageQueue: 

353 """A queue class that contains automatic logging information""" 

354 

355 def __init__(self, log_queue, queue_name): 

356 """ 

357 A queue class that contains automatic logging information 

358 

359 Parameters 

360 ---------- 

361 log_queue : mp.queues.Queue : 

362 A queue that a logging task will read from where the operations of 

363 the queue will be logged. 

364 queue_name : str : 

365 The name of the queue that will be included in the logging information 

366 

367 """ 

368 self.queue = mp.Queue() 

369 self.log_queue = log_queue 

370 self.queue_name = queue_name 

371 self.last_put_message = None 

372 self.last_put_time = -float("inf") 

373 self.last_flush = -float("inf") 

374 self.time_threshold = 1.0 

375 

376 def generate_message_id(self, size=6, chars=string.ascii_letters + string.digits): 

377 """Generates a random identifier for log file messages""" 

378 return "".join(random.choice(chars) for _ in range(size)) 

379 

380 def put(self, task_name, message_data_tuple, *args, **kwargs): 

381 """Puts data to a verbose queue 

382 

383 Parameters 

384 ---------- 

385 task_name : str 

386 Task name that is performing the put operation 

387 message_data_tuple : Tuple 

388 A (message,data) tuple where message is the instruction and data is 

389 any optional data to be passed along with the instruction. 

390 *args : 

391 Additional arguments that will be passed to the mp.queues.Queue.put 

392 function 

393 **kwargs : 

394 Additional arguments that will be passed to the mp.queues.Queue.put 

395 function 

396 

397 """ 

398 put_time = time.time() 

399 if ( 

400 self.last_put_message != message_data_tuple[0] 

401 or put_time - self.last_put_time > self.time_threshold 

402 ): 

403 message_id = self.generate_message_id(8) 

404 self.log_queue.put( 

405 f"{datetime.now()}: {task_name} put " 

406 f"{message_data_tuple[0].name} ({message_id}) to {self.queue_name}\n" 

407 ) 

408 self.last_put_message = message_data_tuple[0] 

409 self.last_put_time = put_time 

410 else: 

411 message_id = "" 

412 self.queue.put((message_id, message_data_tuple), *args, **kwargs) 

413 

414 def get(self, task_name, *args, **kwargs): 

415 """Gets data from a verbose queue 

416 

417 Parameters 

418 ---------- 

419 task_name : str : 

420 Name of the task that is retrieving data from the queue 

421 *args : 

422 Additional arguments that will be passed to the mp.queues.Queue.get 

423 function 

424 **kwargs : 

425 Additional arguments that will be passed to the mp.queues.Queue.get 

426 function 

427 

428 

429 Returns 

430 ------- 

431 message_data_tuple : 

432 A (message,data) tuple 

433 

434 """ 

435 (message_id, message_data_tuple) = self.queue.get(*args, **kwargs) 

436 get_time = datetime.now() 

437 if message_id != "": 

438 self.log_queue.put( 

439 f"{get_time}: {task_name} got " 

440 f"{message_data_tuple[0].name} ({message_id}) from {self.queue_name}\n" 

441 ) 

442 return message_data_tuple 

443 

444 def flush(self, task_name): 

445 """Flushes a verbose queue getting all data currently in the queue 

446 

447 After execution the queue should be empty barring race conditions. 

448 

449 Parameters 

450 ---------- 

451 task_name : str : 

452 Name of the task that is flushing the queue 

453 

454 

455 Returns 

456 ------- 

457 data : iterable of message_data_tuples : 

458 A list of all (message,data) tuples currently in the queue. 

459 

460 """ 

461 flush_time = time.time() 

462 if flush_time - self.last_flush > 0.1: 

463 self.log_queue.put(f"{datetime.now()}: {task_name} flushed {self.queue_name}\n") 

464 self.last_flush = flush_time 

465 data = [] 

466 while True: 

467 try: 

468 message_id, this_data = self.queue.get(False) 

469 data.append(this_data) 

470 if message_id != "": 

471 self.log_queue.put( 

472 f"{datetime.now()}: {task_name} got {data[-1][0].name} (" 

473 f"{message_id if message_id != '' else 'put not logged'})" 

474 f" from {self.queue_name} during flush\n" 

475 ) 

476 except mp.queues.Empty: 

477 return data 

478 

479 def empty(self): 

480 """Return true if the queue is empty.""" 

481 return self.queue.empty() 

482 

483 

484class QueueContainer: 

485 """A container class for the queues that the controller will manage""" 

486 

487 def __init__( 

488 self, 

489 controller_communication_queue: VerboseMessageQueue, 

490 acquisition_command_queue: VerboseMessageQueue, 

491 output_command_queue: VerboseMessageQueue, 

492 streaming_command_queue: VerboseMessageQueue, 

493 log_file_queue: mp_queues.Queue, 

494 input_output_sync_queue: mp_queues.Queue, 

495 single_process_hardware_queue: mp_queues.Queue, 

496 gui_update_queue: mp_queues.Queue, 

497 environment_command_queues: Dict[str, VerboseMessageQueue], 

498 environment_data_in_queues: Dict[str, mp_queues.Queue], 

499 environment_data_out_queues: Dict[str, mp_queues.Queue], 

500 ): 

501 """A container class for the queues that the controller will manage. 

502 

503 The controller uses many queues to pass data between the various pieces. 

504 This class organizes those queues into one common namespace. 

505 

506 Parameters 

507 ---------- 

508 controller_communication_queue : VerboseMessageQueue 

509 Queue that is read by the controller for global controller commands 

510 acquisition_command_queue : VerboseMessageQueue 

511 Queue that is read by the acquisition subtask for acquisition commands 

512 output_command_queue : VerboseMessageQueue 

513 Queue that is read by the output subtask for output commands 

514 streaming_command_queue : VerboseMessageQueue 

515 Queue that is read by the streaming subtask for streaming commands 

516 log_file_queue : mp_queues.Queue 

517 Queue for putting logging messages that will be read by the logging 

518 subtask and written to a file. 

519 input_output_sync_queue : mp_queues.Queue 

520 Queue that is used to synchronize input and output signals 

521 single_process_hardware_queue : mp_queues.Queue 

522 Queue that is used to connect the acquisition and output subtasks 

523 for hardware implementations that cannot have acquisition and 

524 output in separate processes. 

525 gui_update_queue : mp_queues.Queue 

526 Queue where various subtasks put instructions for updating the 

527 widgets in the user interface 

528 environment_command_queues : Dict[str,VerboseMessageQueue] 

529 A dictionary where the keys are environment names and the values are 

530 VerboseMessageQueues that connect the main controller to the 

531 environment subtasks for sending instructions. 

532 environment_data_in_queues : Dict[str,multiprocessing.queues.Queue] 

533 A dictionary where the keys are environment names and the values are 

534 multiprocessing queues that connect the acquisition subtask to the 

535 environment subtask. Each environment will retrieve acquired data 

536 from this queue. 

537 environment_data_out_queues : Dict[str,multiprocessing.queues.Queue] 

538 A dictionary where the keys are environment names and the values are 

539 multiprocessing queues that connect the output subtask to the 

540 environment subtask. Each environment will put data that it wants 

541 the controller to generate in this queue. 

542 

543 """ 

544 self.controller_communication_queue = controller_communication_queue 

545 self.acquisition_command_queue = acquisition_command_queue 

546 self.output_command_queue = output_command_queue 

547 self.streaming_command_queue = streaming_command_queue 

548 self.log_file_queue = log_file_queue 

549 self.input_output_sync_queue = input_output_sync_queue 

550 self.single_process_hardware_queue = single_process_hardware_queue 

551 self.gui_update_queue = gui_update_queue 

552 self.environment_command_queues = environment_command_queues 

553 self.environment_data_in_queues = environment_data_in_queues 

554 self.environment_data_out_queues = environment_data_out_queues 

555 

556 

557def load_csv_matrix(file): 

558 """Loads a matrix from a CSV file 

559 

560 Parameters 

561 ---------- 

562 file : str : 

563 Path to the file that will be loaded 

564 

565 

566 Returns 

567 ------- 

568 data : list[list[str]] 

569 A 2D nested list of strings containing the matrix in the CSV file. 

570 

571 """ 

572 with open(file, "r", encoding="utf-8") as f: 

573 data = [] 

574 for line in f: 

575 data.append([]) 

576 for v in line.split(","): 

577 data[-1].append(v.strip()) 

578 return data 

579 

580 

581def save_csv_matrix(data, file): 

582 """Saves 2D matrix data to a file 

583 

584 Parameters 

585 ---------- 

586 data : 2D iterable of str: 

587 A 2D nested iterable of strings that will be written to a file 

588 file : str : 

589 The path to a file where the data will be written. 

590 

591 """ 

592 text = "\n".join([",".join(row) for row in data]) 

593 with open(file, "w", encoding="utf-8") as f: 

594 f.write(text) 

595 

596 

597def cpsd_to_time_history(cpsd_matrix, sample_rate, df, output_oversample=1): 

598 # pylint: disable=invalid-name 

599 """Generates a time history realization from a CPSD matrix 

600 

601 Parameters 

602 ---------- 

603 cpsd_matrix : np.ndarray : 

604 A 3D complex np.ndarray representing a CPSD matrix where the first 

605 dimension is the frequency line and the second two dimensions are the 

606 rows and columns of the matrix at each frequency line. 

607 sample_rate: float : 

608 The sample rate of the controller in samples per second 

609 df : float : 

610 The frequency spacing of the cpsd matrix 

611 

612 

613 Returns 

614 ------- 

615 output : np.ndarray : 

616 A numpy array containing the generated signals 

617 

618 Notes 

619 ----- 

620 Uses the process described in [1]_ 

621 

622 .. [1] R. Schultz and G. Nelson, "Input signal synthesis for open-loop 

623 multiple-input/multiple-output testing," Proceedings of the International 

624 Modal Analysis Conference, 2019. 

625 

626 """ 

627 # Compute SVD broadcasting over all frequency lines 

628 [U, S, Vh] = np.linalg.svd(cpsd_matrix, full_matrices=False) 

629 # Reform using the sqrt of the S matrix 

630 Lsvd = U * np.sqrt(S[:, np.newaxis, :]) @ Vh 

631 # Compute Random Process 

632 W = np.sqrt(0.5) * ( 

633 np.random.randn(*cpsd_matrix.shape[:-1], 1) 

634 + 1j * np.random.randn(*cpsd_matrix.shape[:-1], 1) 

635 ) 

636 Xv = 1 / np.sqrt(df) * Lsvd @ W 

637 # Ensure that the signal is real by setting the nyquist and DC component to 0 

638 Xv[[0, -1], :, :] = 0 

639 # Compute the IFFT, using the real version makes it so you don't need negative frequencies 

640 zero_padding = np.zeros( 

641 [(output_oversample - 1) * (Xv.shape[0] - 1)] + list(Xv.shape[1:]), 

642 dtype=Xv.dtype, 

643 ) 

644 xv = ( 

645 np.fft.irfft(np.concatenate((Xv, zero_padding), axis=0) / np.sqrt(2), axis=0) 

646 * output_oversample 

647 * sample_rate 

648 ) 

649 output = xv[:, :, 0].T 

650 return output 

651 

652 

653def reduce_array_by_coordinate( 

654 array: np.ndarray, 

655 coordinate: np.ndarray, 

656 control_coordinate: np.ndarray, 

657 excitation_coordinate: np.ndarray = None, 

658): 

659 """Picks out entries in an array based on coordinate strings 

660 

661 Parameters 

662 ---------- 

663 array : np.ndarray 

664 Array to parse 

665 coordinate : np.ndarray 

666 Coordinate names associated with the array 

667 control_coordinate : np.ndarray 

668 Coordinate names associated with control degrees of freedom 

669 excitation_coordinate : np.ndarray, optional 

670 Coordinate names associated with excitation degrees of freedom 

671 

672 Returns 

673 ------- 

674 np.ndarray 

675 An array sorted by the provided coordinate strings 

676 

677 Raises 

678 ------ 

679 ValueError 

680 If requested coordinate strings do not exist in the array strings 

681 """ 

682 if excitation_coordinate is None: 

683 excitation_coordinate = control_coordinate.copy() 

684 # transforming control_coordinate from array of shape (N,) to (N, N, 2) 

685 # (equivalent to SDynPy outer_product) 

686 if array.ndim == 3: 

687 control_coordinate = np.array(np.meshgrid(control_coordinate, excitation_coordinate)).T 

688 elif array.ndim == 2: 

689 control_coordinate = np.tile(control_coordinate, (2, 1)).T 

690 output_shape = control_coordinate.shape[:-1] 

691 ordinate = np.moveaxis(array, 0, -1) 

692 flat_array = ordinate.reshape(-1, ordinate.shape[-1]) 

693 flat_coord = coordinate.flatten().reshape(-1, 2) 

694 index_array = np.empty(output_shape, dtype=int) 

695 positive_coordinates = flat_coord.copy() 

696 positive_coordinates["direction"] = abs(flat_coord["direction"]) 

697 positive_control_coordinates = control_coordinate.copy() 

698 positive_control_coordinates["direction"] = abs(control_coordinate["direction"]) 

699 for index in np.ndindex(output_shape): 

700 positive_key = positive_control_coordinates[index] 

701 try: 

702 index_array[index] = np.where(np.all(positive_coordinates == positive_key, axis=-1))[0][ 

703 0 

704 ] 

705 except IndexError as exc: 

706 raise ValueError( 

707 f"Coordinate {str(control_coordinate[index])} not found in data array" 

708 ) from exc 

709 return_array = flat_array[index_array] 

710 return_coord = flat_coord[index_array] 

711 

712 ordinate_multiplication_array = np.prod( 

713 np.sign(return_coord["direction"]) * np.sign(control_coordinate["direction"]), 

714 axis=-1, 

715 ) 

716 # Set up for broadcasting 

717 ordinate_multiplication_array = ordinate_multiplication_array[..., np.newaxis] 

718 # Remove zeros and replace with 1s because we don't flip signs if 

719 # there is no direction associated with the coordinate 

720 ordinate_multiplication_array[ordinate_multiplication_array == 0] = 1 

721 return_array *= ordinate_multiplication_array 

722 return np.moveaxis(return_array, -1, 0) 

723 

724 

725def flush_queue(queue, timeout=None): 

726 """Flushes a queue by getting all the data currently in it. 

727 

728 Parameters 

729 ---------- 

730 queue : mp.queues.Queue or VerboseMessageQueue: 

731 The queue to flush 

732 

733 

734 Returns 

735 ------- 

736 data : iterable 

737 A list of all data that were in the queue at flush 

738 

739 """ 

740 data = [] 

741 while True: 

742 try: 

743 if isinstance(queue, VerboseMessageQueue): 

744 data.append( 

745 queue.get( 

746 "Flush", 

747 block=False if timeout is None else True, 

748 timeout=timeout, 

749 ) 

750 ) 

751 else: 

752 data.append(queue.get(block=False if timeout is None else True, timeout=timeout)) 

753 except mp.queues.Empty: 

754 return data 

755 

756 

757def db2scale(decibel): 

758 """Converts a decibel value to a scale factor 

759 

760 Parameters 

761 ---------- 

762 decibel : float : 

763 Value in decibels 

764 

765 

766 Returns 

767 ------- 

768 scale : float : 

769 Value in linear 

770 

771 """ 

772 return 10 ** (decibel / 20) 

773 

774 

775def power2db(power): 

776 """Converts a power quantity to decibels""" 

777 return 10 * np.log10(power) 

778 

779 

780def scale2db(scale): 

781 """Converts a scale quantity to decibels""" 

782 return 20 * np.log10(scale) 

783 

784 

785def rms_time(signal, axis=None, keepdims=False): 

786 """Computes RMS over a time signal 

787 

788 Parameters 

789 ---------- 

790 signal : np.ndarray : 

791 Signal over which to compute the root-mean-square value 

792 axis : int : 

793 The dimension over which the mean is performed (Default value = None) 

794 keepdims : bool : 

795 Whether to keep the dimension over which mean is computed (Default value = False) 

796 

797 Returns 

798 ------- 

799 rms : numpy scalar or numpy.ndarray 

800 The root-mean-square value of signal 

801 

802 """ 

803 return np.sqrt(np.mean(signal**2, axis=axis, keepdims=keepdims)) 

804 

805 

806def rms_csd(csd, df): 

807 """Computes RMS of a CPSD matrix 

808 

809 Parameters 

810 ---------- 

811 csd : np.ndarray : 

812 3D complex Numpy array where the first dimension is the frequency line 

813 and the second two dimensions are the rows and columns of the CPSD 

814 matrix. 

815 df : float : 

816 Frequency spacing of the CPSD matrix 

817 

818 Returns 

819 ------- 

820 rms : numpy scalar or numpy.ndarray 

821 The root-mean-square value of signals in the CPSD matrix 

822 

823 """ 

824 return np.sqrt(np.einsum("ijj->j", csd).real * df) 

825 

826 

827def trac(th_1, th_2=None): 

828 """Computes the time response assurance criterion 

829 

830 Parameters 

831 ---------- 

832 th_1 : np.ndarray 

833 Signals to compute the trac on. 

834 th_2 : np.ndarray, optional 

835 Signals to compute the trac against th_1 on. If not specified, the trac of th_1 to itself 

836 is computed 

837 

838 Returns 

839 ------- 

840 np.ndarray 

841 Trac values for each signal or pair of signals 

842 """ 

843 if th_2 is None: 

844 th_2 = th_1 

845 th_1_original_shape = th_1.shape 

846 th_1_flattened = th_1.reshape(-1, th_1.shape[-1]) 

847 th_2_flattened = th_2.reshape(-1, th_2.shape[-1]) 

848 trac_val = np.abs(np.sum(th_1_flattened * th_2_flattened.conj(), axis=-1)) ** 2 / ( 

849 (np.sum(th_1_flattened * th_1_flattened.conj(), axis=-1)) 

850 * np.sum(th_2_flattened * th_2_flattened.conj(), axis=-1) 

851 ) 

852 return trac_val.reshape(th_1_original_shape[:-1]) 

853 

854 

855def moving_sum(signal, n): 

856 """Computes a moving sum of the specified number of items 

857 

858 Parameters 

859 ---------- 

860 signal : np.ndarray 

861 The signal(s) to compute the moving sum on 

862 n : int 

863 The number of items to use in the moving sum 

864 

865 Returns 

866 ------- 

867 np.array 

868 The moving sum computed at each time step in the signal 

869 """ 

870 return_value = np.cumsum(signal, axis=-1) 

871 return_value[..., n:] = return_value[..., n:] - return_value[..., :-n] 

872 return return_value[..., n - 1 :] 

873 

874 

875def corr_norm_signal_spec(signal, specification): 

876 """Computes correlation weighted by the norm of the signals 

877 

878 Parameters 

879 ---------- 

880 signal : np.ndarray 

881 The signal to compute the correlation on 

882 specification : np.ndarray 

883 The signal to compute the correlation against 

884 

885 Returns 

886 ------- 

887 np.ndarray 

888 The weighted correlation signal 

889 """ 

890 correlation = sig.correlate(signal, specification, mode="valid").squeeze() 

891 norm_specification = np.linalg.norm(specification) 

892 norm_signal = np.sqrt(np.sum(moving_sum(signal**2, specification.shape[-1]), axis=0)) 

893 norm_signal[norm_signal == 0] = 1e14 

894 return correlation / norm_specification / norm_signal 

895 

896 

897def corr_norm_spec2(signal, specification): 

898 """Computes correlation weighted by the norm of the specification signal 

899 

900 Parameters 

901 ---------- 

902 signal : np.ndarray 

903 The signal to compute the correlation on 

904 specification : np.ndarray 

905 The signal to compute the correlation against 

906 

907 Returns 

908 ------- 

909 np.ndarray 

910 The weighted correlation signal 

911 """ 

912 correlation = sig.correlate(signal, specification, mode="valid").squeeze() 

913 norm_specification = np.linalg.norm(specification) 

914 return correlation / norm_specification**2 

915 

916 

917def norm_ratio(signal, specification): 

918 """Computes the ratio of the norms of two signals 

919 

920 Parameters 

921 ---------- 

922 signal : np.ndarray 

923 The signal to compute the correlation on 

924 specification : np.ndarray 

925 The signal to compute the correlation against 

926 

927 Returns 

928 ------- 

929 np.ndarray 

930 The norm ratio signal 

931 """ 

932 norm_specification = np.linalg.norm(specification) 

933 norm_signal = np.sqrt(np.sum(moving_sum(signal**2, specification.shape[-1]), axis=0)) 

934 return 1 - np.abs((norm_signal / norm_specification) ** 2 - 1) 

935 

936 

937def correlation_norm_spec_ratio(signal, specification): 

938 """Computes correlation weighted by the ratio of the norms of the signals 

939 

940 Parameters 

941 ---------- 

942 signal : np.ndarray 

943 The signal to compute the correlation on 

944 specification : np.ndarray 

945 The signal to compute the correlation against 

946 

947 Returns 

948 ------- 

949 np.ndarray 

950 The weighted correlation signal 

951 """ 

952 correlation = sig.correlate(signal, specification, mode="valid").squeeze() 

953 norm_specification = np.linalg.norm(specification) 

954 norm_signal = np.sqrt(np.sum(moving_sum(signal**2, specification.shape[-1]), axis=0)) 

955 return correlation / norm_specification**2 - abs(1 - (norm_signal / norm_specification) ** 2) 

956 

957 

958def correlation_norm_signal_spec_ratio(signal, specification): 

959 """Computes correlation weighted by the ratio of the norms of the signals 

960 

961 Parameters 

962 ---------- 

963 signal : np.ndarray 

964 The signal to compute the correlation on 

965 specification : np.ndarray 

966 The signal to compute the correlation against 

967 

968 Returns 

969 ------- 

970 np.ndarray 

971 The weighted correlation signal 

972 """ 

973 correlation = sig.correlate(signal, specification, mode="valid").squeeze() 

974 norm_specification = np.linalg.norm(specification) 

975 norm_signal = np.sqrt(np.sum(moving_sum(signal**2, specification.shape[-1]), axis=0)) 

976 norm_signal_divide = norm_signal.copy() 

977 norm_signal_divide[norm_signal_divide == 0] = 1e14 

978 return correlation / norm_specification / norm_signal_divide - abs( 

979 1 - (norm_signal / norm_specification) ** 2 

980 ) 

981 

982 

983def align_signals( 

984 measurement_buffer, 

985 specification, 

986 correlation_threshold=0.9, 

987 perform_subsample=True, 

988 correlation_metric=None, 

989): 

990 """Computes the time shift between two signals in time 

991 

992 Parameters 

993 ---------- 

994 measurement_buffer : np.ndarray 

995 Signal coming from the measurement 

996 specification : np.ndarray 

997 Signal to align the measurement to 

998 correlation_threshold : float, optional 

999 Threshold for a "good" correlation, by default 0.9 

1000 perform_subsample : bool, optional 

1001 If True, computes a time shift that could be between samples using the phase of the FFT of 

1002 the signals, by default True 

1003 correlation_metric : function, optional 

1004 An optional function to use to change the matching criterion, by default A simple 

1005 correlation is used 

1006 

1007 Returns 

1008 ------- 

1009 spec_portion_aligned : np.ndarray 

1010 The portion of the measurement that lines up with the specification 

1011 delay : float 

1012 The time difference between the measurement and specification 

1013 mean_phase_slope : float 

1014 The slope of the phase computed in the FFT from the subsample alignment. Will be None 

1015 if subsample matching is not used 

1016 found_correlation : float 

1017 The value of the correlation metric used to find the match 

1018 """ 

1019 if correlation_metric is None: 

1020 maximum_possible_correlation = np.sum(specification**2) 

1021 correlation = ( 

1022 sig.correlate(measurement_buffer, specification, mode="valid").squeeze() 

1023 / maximum_possible_correlation 

1024 ) 

1025 else: 

1026 correlation = correlation_metric(measurement_buffer, specification) 

1027 delay = np.argmax(correlation) 

1028 found_correlation = correlation[delay] 

1029 print(f"Max Correlation: {found_correlation}") 

1030 if found_correlation < correlation_threshold: 

1031 return None, None, None, None 

1032 # np.savez('alignment_debug.npz',measurement_buffer=measurement_buffer, 

1033 # specification = specification, 

1034 # correlation_threshold = correlation_threshold) 

1035 specification_portion = measurement_buffer[:, delay : delay + specification.shape[-1]] 

1036 

1037 if perform_subsample: 

1038 # Compute ffts for subsample alignment 

1039 spec_fft = np.fft.rfft(specification, axis=-1) 

1040 spec_portion_fft = np.fft.rfft(specification_portion, axis=-1) 

1041 

1042 # Compute phase angle differences for subpixel alignment 

1043 phase_difference = np.angle(spec_portion_fft / spec_fft) 

1044 phase_slope = phase_difference[..., 1:-1] / np.arange(phase_difference.shape[-1])[1:-1] 

1045 mean_phase_slope = np.median( 

1046 phase_slope 

1047 ) # Use Median to discard outliers due to potentially noisy phase 

1048 

1049 spec_portion_aligned_fft = spec_portion_fft * np.exp( 

1050 -1j * mean_phase_slope * np.arange(spec_portion_fft.shape[-1]) 

1051 ) 

1052 spec_portion_aligned = np.fft.irfft(spec_portion_aligned_fft) 

1053 else: 

1054 spec_portion_aligned = specification_portion.copy() 

1055 mean_phase_slope = None 

1056 return spec_portion_aligned, delay, mean_phase_slope, found_correlation 

1057 

1058 

1059def shift_signal(signal, samples_to_keep, sample_delay, phase_slope): 

1060 """Applies a time shift to a signal by modifying the phase of the FFT 

1061 

1062 Parameters 

1063 ---------- 

1064 signal : np.ndarray 

1065 The signal to shift 

1066 samples_to_keep : int 

1067 The number of samples to keep in the shifted signal 

1068 sample_delay : int 

1069 The number of samples to delay 

1070 phase_slope : float 

1071 The slope of the phase if subsample shift is used 

1072 

1073 Returns 

1074 ------- 

1075 np.ndarray 

1076 The shifted signal 

1077 """ 

1078 signal_sample_aligned = signal[..., sample_delay : sample_delay + samples_to_keep] 

1079 sample_aligned_fft = np.fft.rfft(signal_sample_aligned, axis=-1) 

1080 subsample_aligned_fft = sample_aligned_fft * np.exp( 

1081 -1j * phase_slope * np.arange(sample_aligned_fft.shape[-1]) 

1082 ) 

1083 return np.fft.irfft(subsample_aligned_fft) 

1084 

1085 

1086def wrap(data, period=2 * np.pi): 

1087 """Wraps angle data between -pi/2 and pi/2""" 

1088 return (data + period / 2) % period - period / 2 

1089 

1090 

1091class GlobalCommands(Enum): 

1092 """An enumeration that lists the commands that the controller can accept""" 

1093 

1094 QUIT = -1 

1095 INITIALIZE_DATA_ACQUISITION = -2 

1096 INITIALIZE_ENVIRONMENT_PARAMETERS = -3 

1097 RUN_HARDWARE = -4 

1098 STOP_HARDWARE = -5 

1099 INITIALIZE_STREAMING = -6 

1100 STREAMING_DATA = -7 

1101 FINALIZE_STREAMING = -8 

1102 START_ENVIRONMENT = -9 

1103 STOP_ENVIRONMENT = -10 

1104 START_STREAMING = -11 

1105 STOP_STREAMING = -12 

1106 CREATE_NEW_STREAM = -13 

1107 COMPLETED_SYSTEM_ID = -14 

1108 AT_TARGET_LEVEL = -15 

1109 UPDATE_METADATA = -16 

1110 UPDATE_INTERACTIVE_CONTROL_PARAMETERS = -17 

1111 SEND_INTERACTIVE_COMMAND = -18 

1112 

1113 

1114class OverlapBuffer: 

1115 """Class to hold a buffer stored in a numpy array. 

1116 

1117 This buffer supports overlap; when you pull data out, it doesn't remove the 

1118 data from the buffer.""" 

1119 

1120 def __init__(self, shape, buffer_axis=-1, starting_value=0, dtype="float64"): 

1121 """ 

1122 Creates a buffer object 

1123 

1124 Parameters 

1125 ---------- 

1126 shape : tuple 

1127 Shape of the underlying data array 

1128 buffer_axis : int, optional 

1129 Index corresponding to the buffer axis. The default is -1. 

1130 starting_value : optional 

1131 Initial value of the array. Can be any value or array that can be 

1132 broadcast into the shape of the array. The default is 0. 

1133 dtype : numpy dtype, optional 

1134 The data type of the buffer array. The default is 'float64'. 

1135 """ 

1136 self._buffer_data = np.empty(shape, dtype) 

1137 self._buffer_data[:] = starting_value 

1138 self._buffer_axis = buffer_axis % self.buffer_data.ndim # Makes a positive index 

1139 self._buffer_position = 0 

1140 

1141 @property 

1142 def buffer_position(self): 

1143 """The current buffer position""" 

1144 return self._buffer_position 

1145 

1146 @property 

1147 def buffer_axis(self): 

1148 """The axis of the data that is used as buffer dimension""" 

1149 return self._buffer_axis 

1150 

1151 @property 

1152 def buffer_data(self): 

1153 """Gets the data currently on the buffer""" 

1154 return self._buffer_data 

1155 

1156 def add_data_noshift(self, data): 

1157 """Adds data to the buffer without shifting the buffer""" 

1158 data = np.array(data) 

1159 # Make sure the data will fit into the buffer 

1160 data_slice = tuple( 

1161 [ 

1162 ( 

1163 slice(-self.buffer_data.shape[self.buffer_axis], None) 

1164 if i == self.buffer_axis 

1165 else slice(None) 

1166 ) 

1167 for i in range(self.buffer_data.ndim) 

1168 ] 

1169 ) 

1170 data = data[data_slice] 

1171 # Figure out how much we need to roll the buffer 

1172 new_data_size = data.shape[self.buffer_axis] 

1173 old_data_slice = tuple( 

1174 [ 

1175 slice(new_data_size, None) if i == self.buffer_axis else slice(None) 

1176 for i in range(self.buffer_data.ndim) 

1177 ] 

1178 ) 

1179 self.buffer_data[:] = np.concatenate( 

1180 (self.buffer_data[old_data_slice], data), axis=self.buffer_axis 

1181 ) 

1182 

1183 def add_data(self, data): 

1184 """Adds data to the buffer and shifts the buffer position""" 

1185 self.add_data_noshift(data) 

1186 self._buffer_position += data.shape[self.buffer_axis] 

1187 if self.buffer_position > self.buffer_data.shape[self.buffer_axis]: 

1188 self._buffer_position = self.buffer_data.shape[self.buffer_axis] 

1189 

1190 def get_data_noshift(self, num_samples): 

1191 """Gets data from the buffer without shifting the buffer position""" 

1192 data_start = -self.buffer_position 

1193 data_end = -self.buffer_position + num_samples 

1194 if data_end > 0: 

1195 raise ValueError( 

1196 f"Too many samples requested {num_samples} > " 

1197 f"buffer position of {self.buffer_position}" 

1198 ) 

1199 data_slice = tuple( 

1200 [ 

1201 ( 

1202 slice(data_start, None if data_end == 0 else data_end) 

1203 if i == self.buffer_axis 

1204 else slice(None) 

1205 ) 

1206 for i in range(self.buffer_data.ndim) 

1207 ] 

1208 ) 

1209 return self.buffer_data[data_slice] 

1210 

1211 def get_data(self, num_samples, buffer_shift=None): 

1212 """Gets data from the buffer and updates the position""" 

1213 data = self.get_data_noshift(num_samples) 

1214 if buffer_shift is None: 

1215 self.shift_buffer_position(-num_samples) 

1216 else: 

1217 self.shift_buffer_position(buffer_shift) 

1218 return data 

1219 

1220 def shift_buffer_position(self, samples): 

1221 """Moves the buffer positions""" 

1222 self._buffer_position += samples 

1223 if self._buffer_position < 0: 

1224 self._buffer_position = 0 

1225 if self._buffer_position > self.buffer_data.shape[self.buffer_axis]: 

1226 self._buffer_position = self.buffer_data.shape[self.buffer_axis] 

1227 

1228 def set_buffer_position(self, position=0): 

1229 """Sets the buffer positions""" 

1230 self._buffer_position = position 

1231 if self._buffer_position < 0: 

1232 self._buffer_position = 0 

1233 if self._buffer_position > self.buffer_data.shape[self.buffer_axis]: 

1234 self._buffer_position = self.buffer_data.shape[self.buffer_axis] 

1235 

1236 def __getitem__(self, key): 

1237 return self.buffer_data[key] 

1238 

1239 @property 

1240 def shape(self): 

1241 """Gets the shape of the buffer""" 

1242 return self.buffer_data.shape 

1243 

1244 

1245def load_python_module(module_path): 

1246 """Loads in the Python file at the specified path as a module at runtime 

1247 

1248 Parameters 

1249 ---------- 

1250 module_path : str: 

1251 Path to the module to be loaded 

1252 

1253 

1254 Returns 

1255 ------- 

1256 module : module: 

1257 A reference to the loaded module 

1258 """ 

1259 _, file = os.path.split(module_path) 

1260 file, _ = os.path.splitext(file) 

1261 spec = importlib.util.spec_from_file_location(file, module_path) 

1262 module = importlib.util.module_from_spec(spec) 

1263 spec.loader.exec_module(module) 

1264 return module