Coverage for src/recon3d/downscale.py: 90%
100 statements
« prev ^ index » next coverage.py v7.8.0, created at 2025-04-02 00:06 +0000
« prev ^ index » next coverage.py v7.8.0, created at 2025-04-02 00:06 +0000
1"""
2This module contains functions related to the `downscale` command line argument
3with a provided input file.
5Functions
6---------
7padded_size(img_stack_dim_size, target_res, original_res, tolerance, limit_factor)
8 Determine the expected pad size along a dimension.
10pad_amount(img_stack_dim_size, target_res, original_res, tolerance, limit_factor)
11 Determine the amount of padding to add along a dimension.
13apply_bbox(image_stack, threshold)
14 Crop the image stack to the smallest bounding box over the threshold.
16bbox_range(image_stack, threshold)
17 Calculate the bounding box range in all dimensions.
19save_downscale_stack(image_stack, path, folder_suffix)
20 Save the new stack as a tiff image stack.
22downscale(path_file_input)
23 Downscale the image stack based on the provided input file.
25main()
26 Runs the module from the command line, invoked from pyproject.toml with 'downscale' command.
27"""
29import argparse
30import itertools
31import math
32from pathlib import Path
33from typing import Tuple
35import numpy as np
36from scipy import ndimage
37from pyevtk.hl import gridToVTK
39from recon3d.types import CartesianAxis3D
40import recon3d.utility as ut
43def apply_bbox(image_stack: np.ndarray, min_threshold: float) -> np.ndarray:
44 """
45 Crop the image stack to the smallest bounding box over the threshold.
47 Parameters
48 ----------
49 image_stack : np.ndarray
50 The stack of images as a data cube.
51 min_threshold : float
52 The minimum threshold value for cropping (values greater than this threshold will be retained).
54 Returns
55 -------
56 np.ndarray
57 The cropped image stack.
59 Examples
60 --------
61 >>> image_stack = np.random.rand(10, 10, 10)
62 >>> cropped_stack = apply_bbox(image_stack, 0.5)
63 Cropping image stack to bounding box...
64 """
65 print("Cropping image stack to bounding box...")
67 (z_start, z_end, y_start, y_end, x_start, x_end) = bbox_range(
68 image_stack, min_threshold
69 )
71 # Slice the image stack to the bbox array
72 # Add one to include the full range of the bounding box because of the slicing convention
73 image_stack = image_stack[
74 z_start : z_end + 1,
75 y_start : y_end + 1,
76 x_start : x_end + 1,
77 ]
78 return image_stack
81def bbox_range(
82 image_stack: np.ndarray, min_threshold: float
83) -> Tuple[int, int, int, int, int, int]:
84 """
85 Calculate the bounding box range in all dimensions.
87 Parameters
88 ----------
89 image_stack : np.ndarray
90 The stack of images as a data cube.
91 min_threshold : float
92 The minimum threshold value for calculating the bounding box (values greater than this threshold will be retained).
95 Returns
96 -------
97 tuple[int, int, int, int, int, int]
98 The bounding box range in all dimensions.
100 Examples
101 --------
102 >>> image_stack = np.ones((100, 100, 100))
103 >>> bbox_range(image_stack, 0.5)
104 (0, 99, 0, 99, 0, 99)
105 """
107 image_stack = image_stack > min_threshold
108 # Determine the bbox range, see link
109 # https://stackoverflow.com/questions/31400769/bounding-box-of-numpy-array
111 # TODO: check this function with RGB image stack as input
112 # N = image_stack.ndim
113 N = 3
114 bbox = []
115 for ax in itertools.combinations(reversed(range(N)), N - 1):
116 nonzero = np.any(image_stack, axis=ax)
117 bbox.extend(np.where(nonzero)[0][[0, -1]])
118 (z_start, z_end, y_start, y_end, x_start, x_end) = tuple(bbox)
120 return (z_start, z_end, y_start, y_end, x_start, x_end)
123def downscale(path_file_input: str) -> bool:
124 """
125 Downscale the image stack based on the provided input file.
127 This function reads the input file, processes the image stack, and saves the downscaled stack.
129 Parameters
130 ----------
131 path_file_input : str
132 The path to the input file.
134 Returns
135 -------
136 bool
137 True if the processing is successful, False otherwise.
138 """
140 processed = False
142 print(f"Processing file: {path_file_input}")
144 db = ut.yaml_to_dict(Path(path_file_input))
146 output_stack_type = db["output_stack_type"]
147 if not (
148 (output_stack_type == "downscaled")
149 or (output_stack_type == "bounding_box")
150 or (output_stack_type == "padded")
151 ):
152 raise ValueError(
153 f'Invalid "output_stack_type" of "{output_stack_type}" input. Valid options inclue "downscaled", "bounding_box", or "padded".'
154 )
156 segmented_stack = ut.read_images(
157 Path(db["image_dir"]).expanduser(), db["image_type"]
158 )
159 # zyx dimension ordering
160 print(f"Original array size: {segmented_stack.shape}")
162 dim_list = ["dz", "dy", "dx"]
164 z_pad, y_pad, x_pad = [
165 pad_amount(
166 segmented_stack.shape[dim],
167 db["resolution_output"][dim_list[dim]],
168 db["resolution_input"][dim_list[dim]],
169 db["downscale_tolerance"],
170 db["image_limit_factor"],
171 )
172 for dim in range(0, 3) # TODO should we restrict the dimensions to 3?
173 ]
175 padded_stack = np.pad(segmented_stack, (z_pad, y_pad, x_pad))
176 print(f"New array size: {padded_stack.shape}")
178 # Use list comprehension
179 z_ratio, y_ratio, x_ratio = [
180 db["resolution_input"][dim_list[dim]] / db["resolution_output"][dim_list[dim]]
181 for dim in range(0, 3) # TODO should we restrict the dimensions to 3
182 ]
184 downscaled_stack = ndimage.zoom(
185 padded_stack,
186 (z_ratio, y_ratio, x_ratio),
187 order=0,
188 mode="grid-constant",
189 grid_mode=True,
190 )
192 print(f"Downscaled array size: {downscaled_stack.shape}")
194 output_stack_type = db["output_stack_type"]
195 if output_stack_type == "downscaled":
196 output_stack = downscaled_stack
198 else:
199 cropped_stack = apply_bbox(downscaled_stack, 0)
201 if output_stack_type == "bounding_box":
202 output_stack = cropped_stack
204 else: # output_stack_type == "padded"
205 # Pad after cropping
206 (box_pad_z, box_pad_y, box_pad_x) = (
207 db["padding"]["nz"],
208 db["padding"]["ny"],
209 db["padding"]["nx"],
210 )
211 padded_stack = np.pad(
212 cropped_stack, ((box_pad_z,), (box_pad_y,), (box_pad_x,))
213 )
214 output_stack = padded_stack
216 # output new downscaled stack
217 out_dir = db["out_dir"]
218 folder_suffix = f"{int(db['resolution_output']['dx'])}_dx"
219 save_downscale_stack(output_stack, Path(out_dir), folder_suffix)
221 # TODO: functionalize and test
222 if db["save_npy"]:
223 # npy_path = Path(out_dir).joinpath(f"{folder_suffix}.npy")
224 npy_path = Path(out_dir).expanduser().joinpath(f"{folder_suffix}.npy")
225 np.save(str(npy_path), output_stack)
226 print(".npy file saved.")
228 # TODO: functionalize and test
229 if db["writeVTR"]:
230 data = output_stack
231 nx, ny, nz = data.shape[0], data.shape[1], data.shape[2]
233 x = np.arange(0, nx + 1)
234 y = np.arange(0, ny + 1)
235 z = np.arange(0, nz + 1)
237 # vtk_path = Path(out_dir).joinpath(f"{folder_suffix}")
238 vtk_path = Path(out_dir).expanduser().joinpath(f"{folder_suffix}")
239 gridToVTK(str(vtk_path), x, y, z, cellData={"imagedata": data})
240 print(".vtr file saved.")
242 processed = True # overwrite, if we reach this point, all code is successful
244 print(f"Finished processing file: {path_file_input}")
246 return processed
249def pad_amount(
250 img_stack_dim_size: int,
251 target_res: float,
252 original_res: float,
253 tolerance: float,
254 limit_factor: float,
255) -> tuple[int, int]:
256 """
257 Determine the amount of padding to add along a dimension.
259 If the amount of padding is odd, the larger value will be padded to the front of the dimension.
261 Parameters
262 ----------
263 img_stack_dim_size : int
264 The size of the image stack along a dimension.
265 target_res : float
266 The target resolution.
267 original_res : float
268 The original resolution.
269 tolerance : float
270 The tolerance for the padding calculation.
271 limit_factor : float
272 The limit factor for the padding calculation.
274 Returns
275 -------
276 tuple[int, int]
277 The amount of padding to add to the front and back of the dimension.
279 Examples
280 --------
281 >>> ds.pad_amount(100,0.62,1.0,0.01,2.0)
282 (12, 12)
283 """
284 the_padded_size = padded_size(
285 img_stack_dim_size,
286 target_res,
287 original_res,
288 tolerance,
289 limit_factor,
290 )
291 total_pad = the_padded_size - img_stack_dim_size
292 pad = (math.ceil(total_pad / 2), math.floor(total_pad / 2))
294 return pad
297def padded_size(
298 img_stack_dim_size: int,
299 target_res: float,
300 original_res: float,
301 tolerance: float,
302 limit_factor: float,
303) -> int:
304 """
305 Determine the expected pad size along a dimension.
307 Parameters
308 ----------
309 img_stack_dim_size : int
310 The size of the image stack along a dimension.
311 target_res : float
312 The target resolution.
313 original_res : float
314 The original resolution.
315 tolerance : float
316 The tolerance for the padding calculation.
317 limit_factor : float
318 The limit factor for the padding calculation.
320 Returns
321 -------
322 int
323 The new dimension size after padding.
325 Examples
326 --------
327 >>> padded_size(100, 0.5, 1.0, 0.01, 2.0)
328 100
329 """
331 downscale_factor = float(target_res) / float(original_res)
333 # start at the prior dimension size to enter the while loop
334 new_dim = img_stack_dim_size - 1
336 _too_high, _too_low = True, True
338 _b = 0.0 # what we are comparing the result to
340 while _too_high and _too_low:
341 if new_dim > (img_stack_dim_size * int(limit_factor)):
342 raise ValueError(
343 f'Could not find even padding within "{tolerance}" for a {limit_factor}x image size. Increase tolerance or choose a different "target_res"'
344 )
345 new_dim += 1
347 # to avoid the modulo (new_dim % downscale_factor) float error, so we must do manually
348 _a_high = new_dim - (math.ceil(new_dim / downscale_factor) * downscale_factor)
349 _a_low = new_dim - (math.floor(new_dim / downscale_factor) * downscale_factor)
351 _too_high = not math.isclose(
352 _a_high,
353 _b,
354 abs_tol=tolerance,
355 )
356 _too_low = not math.isclose(
357 _a_low,
358 _b,
359 abs_tol=tolerance,
360 )
362 return new_dim
365def save_downscale_stack(
366 image_stack: np.ndarray, path: Path, folder_suffix: str
367) -> bool:
368 """
369 Save the new stack as a tiff image stack.
371 Parameters
372 ----------
373 image_stack : np.ndarray
374 The stack of images as a data cube.
375 path : Path
376 The save path.
377 folder_suffix : str
378 The resolution in the suffix of the save folder "images_at_resolution_{RES_dx}".
380 Returns
381 -------
382 bool
383 True for success, False otherwise.
385 Examples
386 --------
387 >>> image_stack = np.random.rand(100, 100, 100)
388 >>> save_downscale_stack(image_stack, Path("/path/to/save"), "0.5_dx")
389 Saving cropped image stack in /path/to/save > images_at_resolution_0.5_dx
390 True
391 """
393 image_folder_name = f"images_at_resolution_{folder_suffix}"
394 print(f"Saving cropped image stack in {path} > {image_folder_name}")
395 ut.ndarray_to_img(
396 data=image_stack,
397 slice_axis=CartesianAxis3D.Z,
398 parent_dir=path,
399 folder_name=image_folder_name,
400 )
402 return True
405def main():
406 """
407 Runs the module from the command line, invoked from pyproject.toml with 'downscale' command.
409 This function sets up the command line argument parser, parses the input arguments,
410 and calls the `downscale` function with the provided input file.
412 Parameters
413 ----------
414 None
416 Returns
417 -------
418 None
420 Examples
421 --------
422 To run this module from the command line with the 'downscale' command:
424 $ downscale input_file.yml
425 """
427 parser = argparse.ArgumentParser()
428 parser.add_argument("input_file", help="the .yml user input file")
429 args = parser.parse_args()
430 input_file = args.input_file
432 downscale(path_file_input=input_file)
435if __name__ == "__main__":
436 main()