Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 16 additions & 17 deletions openpiv/pyprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -722,7 +722,7 @@ def fft_correlate_images(
# longer exposure for frame B
# image_a = match_histograms(image_a, image_b)

# remove mean background, normalize to 0..1 range
# remove mean, divide by standard deviation
image_a = normalize_intensity(image_a)
image_b = normalize_intensity(image_b)

Expand All @@ -747,15 +747,18 @@ def fft_correlate_images(
print(f"correlation method {correlation_method } is not implemented")

if normalized_correlation:
corr = corr/(s2[0]*s2[1]) # for extended search area
corr = np.clip(corr, 0, 1)
corr = corr/(corr.shape[-2]*corr.shape[-1]) # for extended search area

return corr


def normalize_intensity(window):
"""Normalize interrogation window or strided image of many windows,
Comment on lines 755 to 756
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nitpick (typo): Fix typos and phrasing in the normalize_intensity docstring for clarity.

Use "standard deviation" and "might not be fully converged" here to improve clarity of the docstring.

Suggested change
def normalize_intensity(window):
"""Normalize interrogation window or strided image of many windows,
def normalize_intensity(window):
"""Normalize an interrogation window or a strided image of many windows
by removing the mean intensity per window and dividing by the
standard deviation.
Note: for small signals the standard deviation might not be fully
converged. NumPy also recommends using float64 for improved
numerical accuracy:
https://numpy.org/doc/stable/reference/generated/numpy.std.html

by removing the mean intensity value per window and clipping the
negative values to zero
by removing the mean intensity value per window and dividing by
the standard deviation. Note: for small signals the standdeviation
might not be full converged. Also numpy docs recommend float64 for
better accuracy:
https://numpy.org/doc/stable/reference/generated/numpy.std.html

Parameters
----------
Expand All @@ -765,22 +768,20 @@ def normalize_intensity(window):
Returns
-------
window : 2d np.ndarray
the interrogation window array, with mean value equal to zero and
intensity normalized to -1 +1 and clipped if some pixels are
extra low/high
the interrogation window array, with zero mean and variance 1
"""
# Convert to float32 only if needed, otherwise work in-place
if window.dtype != np.float32:
window = window.astype(np.float32)
# Convert to float64 only if needed, otherwise work in-place
if window.dtype != np.float64:
window = window.astype(np.float64)
else:
window = window.copy() # Still need a copy to avoid modifying input

window -= window.mean(axis=(-2, -1),
keepdims=True, dtype=np.float32)
keepdims=True, dtype=np.float64)
tmp = window.std(axis=(-2, -1), keepdims=True)
window = np.divide(window, tmp, out=np.zeros_like(window),
where=(tmp != 0))
return np.clip(window, 0, window.max())
return window


def correlate_windows(window_a, window_b, correlation_method="fft",
Expand Down Expand Up @@ -825,9 +826,7 @@ def correlate_windows(window_a, window_b, correlation_method="fft",
It leads to inconsistency of the output
"""

# first we remove the mean to normalize contrast and intensity
# the background level which is take as a mean of the image
# is subtracted
# remove mean, divide by standard deviation
# import pdb; pdb.set_trace()
window_a = normalize_intensity(window_a)
window_b = normalize_intensity(window_b)
Expand All @@ -849,7 +848,7 @@ def correlate_windows(window_a, window_b, correlation_method="fft",
else:
print(f"correlation method {correlation_method } is not implemented")

return corr
return corr/(corr.shape[-2]*corr.shape[-1])
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

issue (bug_risk): Consider whether correlate_windows should always normalize by window size or offer a flag for raw vs. normalized correlation.

This change always scales by window area, unlike fft_correlate_images, where normalization is controlled by normalized_correlation. If callers currently rely on raw correlation magnitudes, this becomes a backward-incompatible change and may cause subtle issues (e.g., double-normalization or misread peak heights). Consider adding a normalized_correlation parameter here too, or verifying that all call sites expect normalized output.



def fft_correlate_windows(window_a, window_b,
Expand Down
8 changes: 4 additions & 4 deletions openpiv/test/test_performance.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,14 @@ def test_find_all_first_peaks_performance():

def test_normalize_intensity_performance():
"""Test that normalize_intensity avoids unnecessary conversions."""
# Test with float32 input (should not convert)
window_float = np.random.rand(50, 64, 64).astype(np.float32)
# Test with float64 input (should not convert)
window_float = np.random.rand(50, 64, 64).astype(np.float64)

start = time.time()
result = pyprocess.normalize_intensity(window_float)
elapsed_float = time.time() - start

assert result.dtype == np.float32
assert result.dtype == np.float64
Comment on lines 34 to +43
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

suggestion (testing): Add functional tests for normalize_intensity statistical properties and zero-variance windows

This test now only covers dtype and timing, but the function’s new guarantees (zero mean, unit variance, and zero-std handling) aren’t verified. Please add a separate, non-performance test (e.g. in test_pyprocess.py or a dedicated test_normalize_intensity.py) that:

  • Checks that each output window from random input has mean ≈ 0 and std ≈ 1 within a reasonable tolerance.
  • Verifies that a constant-intensity window (std == 0) produces all zeros, matching the np.divide(..., where=(tmp != 0)) behavior.
  • Optionally confirms that integer inputs (e.g. uint8) behave consistently with float inputs.

This will ensure the new normalization behavior is properly covered by tests.


# Test with uint8 input (needs conversion)
window_uint = (np.random.rand(50, 64, 64) * 255).astype(np.uint8)
Expand All @@ -49,7 +49,7 @@ def test_normalize_intensity_performance():
result = pyprocess.normalize_intensity(window_uint)
elapsed_uint = time.time() - start

assert result.dtype == np.float32
assert result.dtype == np.float64

# Should be reasonably fast (< 50ms for 50 windows)
assert elapsed_float < 0.05, f"normalize_intensity (float32) took {elapsed_float:.4f}s"
Expand Down
4 changes: 2 additions & 2 deletions openpiv/test/test_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,8 +166,8 @@ def test_sig2noise_ratio():
subpixel_method="gaussian"
)
# print(s2n.flatten().min(),s2n.mean(),s2n.max())
assert np.allclose(s2n.mean(), 1.422, rtol=1e-3)
assert np.allclose(s2n.max(), 2.264, rtol=1e-3)
assert np.allclose(s2n.mean(), 2.564, rtol=1e-3)
assert np.allclose(s2n.max(), 4.119, rtol=1e-3)


def test_fft_correlate():
Expand Down
3 changes: 2 additions & 1 deletion openpiv/test/test_pyprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,8 @@ def test_fft_correlate_images():
assert corr_norm.shape[0] == 3 # Should have same batch size

# Normalized correlation should have values between -1 and 1
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

suggestion (testing): Add targeted tests for correlate_windows normalization and scaling by correlation field size

The new behavior normalizes correlate_windows by corr.shape[-2] * corr.shape[-1], and fft_correlate_images now relies on normalize_intensity with zero-mean/unit-variance. The current tests only cover this indirectly.

Please add a focused test for correlate_windows that:

  • Correlates identical windows of different sizes and asserts that the peak normalized correlation is comparable (i.e., does not scale with window area).
  • Optionally verifies any flag that enables/disables normalization.

This will directly validate the new normalization and scaling behavior across window sizes.

assert np.all(corr_norm <= 1.0 + 1e-10) # Allow small floating point errors
assert np.all(corr_norm <= 1.5) # Allow a rather larger error because the std is not converged
# on the small test window

# Test with invalid correlation method - the function prints an error but doesn't raise an exception
# Instead, it returns None for the 'corr' variable, which causes an error later
Expand Down