Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 39 additions & 4 deletions src/easyscience/fitting/minimizers/minimizer_bumps.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
# SPDX-License-Identifier: BSD-3-Clause

import copy
import functools
import inspect
from typing import Callable
from typing import List
from typing import Optional
Expand All @@ -28,6 +30,19 @@
FIT_AVAILABLE_IDS_FILTERED.remove('pt')


class _EvalCounter:
def __init__(self, fn: Callable):
self._fn = fn
self.count = 0
self.__name__ = getattr(fn, '__name__', self.__class__.__name__)
self.__signature__ = inspect.signature(fn)
functools.update_wrapper(self, fn)

def __call__(self, *args, **kwargs):
self.count += 1
return self._fn(*args, **kwargs)


class Bumps(MinimizerBase):
"""
This is a wrapper to Bumps: https://bumps.readthedocs.io/
Expand All @@ -54,6 +69,7 @@ def __init__(
"""
super().__init__(obj=obj, fit_function=fit_function, minimizer_enum=minimizer_enum)
self._p_0 = {}
self._eval_counter: Optional[_EvalCounter] = None

@staticmethod
def all_methods() -> List[str]:
Expand Down Expand Up @@ -148,7 +164,7 @@ def fit(
try:
model_results = bumps_fit(problem, **method_dict, **minimizer_kwargs, **kwargs)
self._set_parameter_fit_result(model_results, stack_status, problem._parameters)
results = self._gen_fit_results(model_results)
results = self._gen_fit_results(model_results, max_evaluations=max_evaluations)
except Exception as e:
for key in self._cached_pars.keys():
self._cached_pars[key].value = self._cached_pars_vals[key][0]
Expand Down Expand Up @@ -200,7 +216,8 @@ def _make_model(self, parameters: Optional[List[BumpsParameter]] = None) -> Call
:return: Callable to make a bumps Curve model
:rtype: Callable
"""
fit_func = self._generate_fit_function()
fit_func = _EvalCounter(self._generate_fit_function())
self._eval_counter = fit_func

def _outer(obj):
def _make_func(x, y, weights):
Expand Down Expand Up @@ -249,7 +266,12 @@ def _set_parameter_fit_result(
if stack_status:
global_object.stack.endMacro()

def _gen_fit_results(self, fit_results, **kwargs) -> FitResults:
def _gen_fit_results(
self,
fit_results,
max_evaluations: Optional[int] = None,
**kwargs,
) -> FitResults:
"""Convert fit results into the unified `FitResults` format.

:param fit_result: Fit object which contains info on the fit
Expand All @@ -261,7 +283,14 @@ def _gen_fit_results(self, fit_results, **kwargs) -> FitResults:
for name, value in kwargs.items():
if getattr(results, name, False):
setattr(results, name, value)
results.success = fit_results.success
n_evaluations = None if self._eval_counter is None else self._eval_counter.count
stopped_on_budget = (
max_evaluations is not None
and n_evaluations is not None
and n_evaluations >= max_evaluations
)

results.success = fit_results.success and not stopped_on_budget
pars = self._cached_pars
item = {}
for index, name in enumerate(self._cached_model.pars.keys()):
Expand All @@ -275,6 +304,12 @@ def _gen_fit_results(self, fit_results, **kwargs) -> FitResults:
results.y_obs = self._cached_model.y
results.y_calc = self.evaluate(results.x, minimizer_parameters=results.p)
results.y_err = self._cached_model.dy
results.n_evaluations = n_evaluations
results.message = (
f'Fit stopped: reached maximum evaluations ({max_evaluations})'
if stopped_on_budget
else ''
)
# results.residual = results.y_obs - results.y_calc
# results.goodness_of_fit = np.sum(results.residual**2)
results.minimizer_engine = self.__class__
Expand Down
15 changes: 11 additions & 4 deletions src/easyscience/fitting/minimizers/minimizer_dfo.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,10 @@
model_results = self._dfo_fit(self._cached_pars, model, **kwargs)
self._set_parameter_fit_result(model_results, stack_status)
results = self._gen_fit_results(model_results, weights)
except FitError:
for key in self._cached_pars.keys():
self._cached_pars[key].value = self._cached_pars_vals[key][0]
raise

Check warning on line 128 in src/easyscience/fitting/minimizers/minimizer_dfo.py

View check run for this annotation

Codecov / codecov/patch

src/easyscience/fitting/minimizers/minimizer_dfo.py#L126-L128

Added lines #L126 - L128 were not covered by tests
except Exception as e:
for key in self._cached_pars.keys():
self._cached_pars[key].value = self._cached_pars_vals[key][0]
Expand Down Expand Up @@ -208,7 +212,7 @@
for name, value in kwargs.items():
if getattr(results, name, False):
setattr(results, name, value)
results.success = not bool(fit_results.flag)
results.success = fit_results.flag == fit_results.EXIT_SUCCESS

pars = {}
for p_name, par in self._cached_pars.items():
Expand All @@ -220,11 +224,14 @@
results.y_obs = self._cached_model.y
results.y_calc = self.evaluate(results.x, minimizer_parameters=results.p)
results.y_err = weights
results.n_evaluations = int(fit_results.nf)
results.message = str(fit_results.msg)
# results.residual = results.y_obs - results.y_calc
# results.goodness_of_fit = fit_results.f

results.minimizer_engine = self.__class__
results.fit_args = None
results.engine_result = fit_results
# results.check_sanity()

return results
Expand Down Expand Up @@ -258,10 +265,10 @@

results = dfols.solve(model, pars_values, bounds=bounds, **kwargs)

if 'Success' not in results.msg:
raise FitError(f'Fit failed with message: {results.msg}')
if results.flag in {results.EXIT_SUCCESS, results.EXIT_MAXFUN_WARNING}:
return results

return results
raise FitError(f'Fit failed with message: {results.msg}')

@staticmethod
def _prepare_kwargs(
Expand Down
2 changes: 2 additions & 0 deletions src/easyscience/fitting/minimizers/minimizer_lmfit.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,6 +298,8 @@ def _gen_fit_results(self, fit_results: ModelResult, **kwargs) -> FitResults:
# results.goodness_of_fit = fit_results.chisqr
results.y_calc = fit_results.best_fit
results.y_err = 1 / fit_results.weights
results.n_evaluations = fit_results.nfev
results.message = fit_results.message
results.minimizer_engine = self.__class__
results.fit_args = None

Expand Down
39 changes: 38 additions & 1 deletion src/easyscience/fitting/minimizers/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ class FitResults:
'y_obs',
'y_calc',
'y_err',
'n_evaluations',
'message',
'engine_result',
'total_results',
]
Expand All @@ -35,9 +37,44 @@ def __init__(self):
self.y_obs = np.ndarray([])
self.y_calc = np.ndarray([])
self.y_err = np.ndarray([])
self.n_evaluations = None
self.message = ''
self.engine_result = None
self.total_results = None

def __repr__(self) -> str:
engine_name = self.minimizer_engine.__name__ if self.minimizer_engine else None
try:
chi2_val = self.chi2
reduced_val = self.reduced_chi2
if not np.isfinite(chi2_val) or not np.isfinite(reduced_val):
raise ValueError
chi2 = f'{chi2_val:.4g}'
reduced = f'{reduced_val:.4g}'
except Exception:
chi2 = 'N/A'
reduced = 'N/A'

try:
n_points = len(self.x)
except TypeError:
n_points = 0

lines = [
f'FitResults(success={self.success}',
f' n_pars={self.n_pars}, n_points={n_points}',
f' chi2={chi2}, reduced_chi2={reduced}',
f' n_evaluations={self.n_evaluations}',
f' minimizer={engine_name}',
]
if self.message:
lines.append(f" message='{self.message}'")
if self.p:
par_str = ', '.join(f'{k}={v:.4g}' for k, v in self.p.items())
lines.append(f' parameters={{{par_str}}}')
lines.append(')')
return '\n'.join(lines)

@property
def n_pars(self):
return len(self.p)
Expand All @@ -51,7 +88,7 @@ def chi2(self):
return ((self.residual / self.y_err) ** 2).sum()

@property
def reduced_chi(self):
def reduced_chi2(self):
return self.chi2 / (len(self.x) - self.n_pars)


Expand Down
2 changes: 2 additions & 0 deletions src/easyscience/fitting/multi_fitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,8 @@ def _post_compute_reshaping(
current_results.minimizer_engine = fit_result_obj.minimizer_engine
current_results.p = fit_result_obj.p
current_results.p0 = fit_result_obj.p0
current_results.n_evaluations = fit_result_obj.n_evaluations
current_results.message = fit_result_obj.message
current_results.x = this_x
current_results.y_obs = y[idx]
current_results.y_calc = np.reshape(
Expand Down
56 changes: 45 additions & 11 deletions tests/integration/fitting/test_fitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def __call__(self, x: np.ndarray) -> np.ndarray:
def check_fit_results(result, sp_sin, ref_sin, x, **kwargs):
assert result.n_pars == len(sp_sin.get_fit_parameters())
assert result.chi2 == pytest.approx(0, abs=1.5e-3 * (len(result.x) - result.n_pars))
assert result.reduced_chi == pytest.approx(0, abs=1.5e-3)
assert result.reduced_chi2 == pytest.approx(0, abs=1.5e-3)
assert result.success
if 'sp_ref1' in kwargs.keys():
sp_ref1 = kwargs['sp_ref1']
Expand Down Expand Up @@ -207,14 +207,48 @@ def test_basic_max_evaluations(fit_engine):
except AttributeError:
pytest.skip(msg=f'{fit_engine} is not installed')
f.max_evaluations = 3
try:
result = f.fit(x=x, y=y, weights=weights)
# Result should not be the same as the reference
assert sp_sin.phase.value != pytest.approx(ref_sin.phase.value, rel=1e-3)
assert sp_sin.offset.value != pytest.approx(ref_sin.offset.value, rel=1e-3)
except FitError as e:
# DFO throws a different error
assert 'Objective has been called MAXFUN times' in str(e)
result = f.fit(x=x, y=y, weights=weights)
# Result should not be the same as the reference
assert sp_sin.phase.value != pytest.approx(ref_sin.phase.value, rel=1e-3)
assert sp_sin.offset.value != pytest.approx(ref_sin.offset.value, rel=1e-3)


@pytest.mark.fast
@pytest.mark.parametrize(
'fit_engine',
[
None,
AvailableMinimizers.LMFit,
AvailableMinimizers.Bumps,
AvailableMinimizers.DFO,
],
)
def test_max_evaluations_populates_fit_result_fields(fit_engine):
"""With a tight budget every engine must return success=False, n_evaluations>0, non-empty message."""
ref_sin = AbsSin(0.2, np.pi)
sp_sin = AbsSin(0.354, 3.05)

x = np.linspace(0, 5, 200)
weights = np.ones_like(x)
y = ref_sin(x)

sp_sin.offset.fixed = False
sp_sin.phase.fixed = False

f = Fitter(sp_sin, sp_sin)
if fit_engine is not None:
try:
f.switch_minimizer(fit_engine)
except AttributeError:
pytest.skip(msg=f'{fit_engine} is not installed')
f.max_evaluations = 3
result = f.fit(x=x, y=y, weights=weights)

assert result.success is False
assert result.n_evaluations is not None
assert result.n_evaluations > 0
Comment thread
rozyczko marked this conversation as resolved.
assert isinstance(result.message, str)
assert len(result.message) > 0


@pytest.mark.fast
Expand Down Expand Up @@ -351,7 +385,7 @@ def test_2D_vectorized(fit_engine):
else:
raise e
assert result.n_pars == len(m2.get_fit_parameters())
assert result.reduced_chi == pytest.approx(0, abs=1.5e-3)
assert result.reduced_chi2 == pytest.approx(0, abs=1.5e-3)
assert result.success
assert np.all(result.x == XY)
y_calc_ref = m2(XY)
Expand Down Expand Up @@ -390,7 +424,7 @@ def test_2D_non_vectorized(fit_engine):
else:
raise e
assert result.n_pars == len(m2.get_fit_parameters())
assert result.reduced_chi == pytest.approx(0, abs=1.5e-3)
assert result.reduced_chi2 == pytest.approx(0, abs=1.5e-3)
assert result.success
assert np.all(result.x == XY)
y_calc_ref = m2(XY.reshape(-1, 2))
Expand Down
46 changes: 43 additions & 3 deletions tests/integration/fitting/test_multi_fitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,14 +95,54 @@ def test_multi_fit(fit_engine):
sp_sin_2.get_fit_parameters()
)
assert result.chi2 == pytest.approx(0, abs=1.5e-3 * (len(result.x) - result.n_pars))
assert result.reduced_chi == pytest.approx(0, abs=1.5e-3)
assert result.reduced_chi2 == pytest.approx(0, abs=1.5e-3)
assert result.success
assert np.all(result.x == X[idx])
assert np.all(result.y_obs == Y[idx])
assert result.y_calc == pytest.approx(F_ref[idx](X[idx]), abs=1e-2)
assert result.residual == pytest.approx(F_real[idx](X[idx]) - F_ref[idx](X[idx]), abs=1e-2)


@pytest.mark.parametrize('fit_engine', [None, 'LMFit', 'Bumps', 'DFO'])
def test_multi_fit_propagates_n_evaluations_and_message(fit_engine):
"""Verify that n_evaluations and message are copied into each per-dataset result."""
ref_sin_1 = AbsSin(0.2, np.pi)
sp_sin_1 = AbsSin(0.354, 3.05)
ref_sin_2 = AbsSin(np.pi * 0.45, 0.45 * np.pi * 0.5)
sp_sin_2 = AbsSin(1, 0.5)

ref_sin_2.offset.make_dependent_on(
dependency_expression='ref_sin1', dependency_map={'ref_sin1': ref_sin_1.offset}
)
sp_sin_2.offset.make_dependent_on(
dependency_expression='sp_sin1', dependency_map={'sp_sin1': sp_sin_1.offset}
)

x1 = np.linspace(0, 5, 200)
y1 = ref_sin_1(x1)
x2 = np.copy(x1)
y2 = ref_sin_2(x2)
weights = np.ones_like(x1)

sp_sin_1.offset.fixed = False
sp_sin_1.phase.fixed = False
sp_sin_2.phase.fixed = False

f = MultiFitter([sp_sin_1, sp_sin_2], [sp_sin_1, sp_sin_2])
if fit_engine is not None:
try:
f.switch_minimizer(fit_engine)
except AttributeError:
pytest.skip(msg=f'{fit_engine} is not installed')

results = f.fit(x=[x1, x2], y=[y1, y2], weights=[weights, weights])
for result in results:
assert result.n_evaluations is not None
assert isinstance(result.n_evaluations, int)
assert result.n_evaluations > 0
assert isinstance(result.message, str)


@pytest.mark.parametrize('fit_engine', [None, 'LMFit', 'Bumps', 'DFO'])
def test_multi_fit2(fit_engine):
ref_sin_1 = AbsSin(0.2, np.pi)
Expand Down Expand Up @@ -160,7 +200,7 @@ def test_multi_fit2(fit_engine):
sp_sin_2.get_fit_parameters()
) + len(sp_line.get_fit_parameters())
assert result.chi2 == pytest.approx(0, abs=1.5e-3 * (len(result.x) - result.n_pars))
assert result.reduced_chi == pytest.approx(0, abs=1.5e-3)
assert result.reduced_chi2 == pytest.approx(0, abs=1.5e-3)
assert result.success
assert np.all(result.x == X[idx])
assert np.all(result.y_obs == Y[idx])
Expand Down Expand Up @@ -235,7 +275,7 @@ def test_multi_fit_1D_2D(fit_engine):
fit_engine != 'DFO'
): # DFO apparently does not fit well with even weights. Can't be bothered to fix
assert result.chi2 == pytest.approx(0, abs=1.5e-3 * (len(result.x) - result.n_pars))
assert result.reduced_chi == pytest.approx(0, abs=1.5e-3)
assert result.reduced_chi2 == pytest.approx(0, abs=1.5e-3)
assert result.y_calc == pytest.approx(F_ref[idx](X[idx]), abs=1e-2)
assert result.residual == pytest.approx(
F_real[idx](X[idx]) - F_ref[idx](X[idx]), abs=1e-2
Expand Down
Loading
Loading