-
Notifications
You must be signed in to change notification settings - Fork 2
Interim updates #232
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: develop
Are you sure you want to change the base?
Interim updates #232
Changes from all commits
9d29d8f
2f66a46
2766be8
92036d4
c60bf5a
53a80eb
7858954
665e99e
be61cb9
372de24
4fc6d4b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -4,11 +4,12 @@ | |
| import copy | ||
| from typing import Callable | ||
| from typing import List | ||
| from typing import Optional | ||
|
|
||
| import numpy as np | ||
| from bumps.fitters import FIT_AVAILABLE_IDS | ||
| from bumps.fitters import fit as bumps_fit | ||
| from bumps.fitters import FITTERS | ||
| from bumps.fitters import FitDriver | ||
| from bumps.monitor import Monitor | ||
| from bumps.names import Curve | ||
| from bumps.names import FitProblem | ||
| from bumps.parameter import Parameter as BumpsParameter | ||
|
|
@@ -28,6 +29,40 @@ | |
| FIT_AVAILABLE_IDS_FILTERED.remove('pt') | ||
|
|
||
|
|
||
| class _StepCounterMonitor(Monitor): | ||
| """Lightweight monitor that ensures step count is recorded in | ||
| history. | ||
| """ | ||
|
|
||
| def __init__(self): | ||
| self.last_step = 0 | ||
|
|
||
| def config_history(self, history): | ||
| history.requires(step=1) | ||
|
|
||
| def __call__(self, history): | ||
| self.last_step = int(history.step[0]) | ||
|
rozyczko marked this conversation as resolved.
|
||
|
|
||
|
|
||
| class _BumpsProgressMonitor(Monitor): | ||
| def __init__(self, problem, callback, payload_builder): | ||
| self._problem = problem | ||
| self._callback = callback | ||
| self._payload_builder = payload_builder | ||
|
|
||
| def config_history(self, history): | ||
| history.requires(step=1, point=1, value=1) | ||
|
|
||
| def __call__(self, history): | ||
| payload = self._payload_builder( | ||
| problem=self._problem, | ||
| iteration=int(history.step[0]), | ||
| point=np.asarray(history.point[0]), | ||
| nllf=float(history.value[0]), | ||
|
rozyczko marked this conversation as resolved.
|
||
| ) | ||
| self._callback(payload) | ||
|
|
||
|
|
||
| class Bumps(MinimizerBase): | ||
| """ | ||
| This is a wrapper to Bumps: https://bumps.readthedocs.io/ | ||
|
|
@@ -40,7 +75,7 @@ | |
| self, | ||
| obj, #: ObjBase, | ||
| fit_function: Callable, | ||
| minimizer_enum: Optional[AvailableMinimizers] = None, | ||
| minimizer_enum: AvailableMinimizers | None = None, | ||
| ): # todo after constraint changes, add type hint: obj: ObjBase # noqa: E501 | ||
| """Initialize the fitting engine with a `ObjBase` and an | ||
| arbitrary fitting function. | ||
|
|
@@ -70,16 +105,17 @@ | |
| x: np.ndarray, | ||
| y: np.ndarray, | ||
| weights: np.ndarray, | ||
| model: Optional[Callable] = None, | ||
| parameters: Optional[Parameter] = None, | ||
| method: Optional[str] = None, | ||
| tolerance: Optional[float] = None, | ||
| max_evaluations: Optional[int] = None, | ||
| minimizer_kwargs: Optional[dict] = None, | ||
| engine_kwargs: Optional[dict] = None, | ||
| model: Callable | None = None, | ||
| parameters: List[Parameter] | None = None, | ||
| method: str | None = None, | ||
| tolerance: float | None = None, | ||
| max_evaluations: int | None = None, | ||
| progress_callback: Callable[[dict], bool | None] | None = None, | ||
| minimizer_kwargs: dict | None = None, | ||
| engine_kwargs: dict | None = None, | ||
| **kwargs, | ||
| ) -> FitResults: | ||
| """Perform a fit using the lmfit engine. | ||
| """Perform a fit using the BUMPS engine. | ||
|
|
||
| :param x: points to be calculated at | ||
| :type x: np.ndarray | ||
|
|
@@ -88,17 +124,14 @@ | |
| :param weights: Weights for supplied measured points | ||
| :type weights: np.ndarray | ||
| :param model: Optional Model which is being fitted to | ||
| :type model: lmModel | ||
| :param parameters: Optional parameters for the fit | ||
| :type parameters: List[BumpsParameter] | ||
| :param kwargs: Additional arguments for the fitting function. | ||
| :param method: Method for minimization | ||
| :type method: str | ||
| :param progress_callback: Optional callback for progress updates | ||
| :type progress_callback: Callable | ||
| :return: Fit results | ||
| :rtype: ModelResult For standard least squares, the weights | ||
| should be 1/sigma, where sigma is the standard deviation of | ||
| the measurement. For unweighted least squares, these should | ||
| be 1. | ||
| :rtype: FitResults | ||
| """ | ||
| method_dict = self._get_method_kwargs(method) | ||
|
|
||
|
|
@@ -139,23 +172,82 @@ | |
| self._p_0 = {f'p{key}': self._cached_pars[key].value for key in self._cached_pars.keys()} | ||
|
|
||
| problem = FitProblem(model) | ||
|
|
||
| method_str = method_dict.get('method', self._method) | ||
| fitclass = self._resolve_fitclass(method_str) | ||
|
|
||
| step_counter = _StepCounterMonitor() | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should we always monitor the fit? Doesn't this slow down the fit? This should probably be optional so that it can be skipped when run in the library.
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not all monitoring here is the same. Here, we always attach only the lightweight
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We don't always monitor full fit progress; we always attach a minimal step counter because the wrapper currently needs iteration count for FitResults. That should have negligible overhead relative to actual model evaluations. |
||
| monitors = [step_counter] | ||
| if progress_callback is not None: | ||
| if not callable(progress_callback): | ||
| raise ValueError('progress_callback must be callable') | ||
| monitors.append( | ||
| _BumpsProgressMonitor(problem, progress_callback, self._build_progress_payload) | ||
| ) | ||
|
|
||
| driver = FitDriver( | ||
| fitclass=fitclass, | ||
| problem=problem, | ||
| monitors=monitors, | ||
| **minimizer_kwargs, | ||
| ) | ||
|
rozyczko marked this conversation as resolved.
|
||
| driver.clip() | ||
|
|
||
| # Why do we do this? Because a fitting template has to have global_object instantiated outside pre-runtime | ||
| from easyscience import global_object | ||
|
|
||
| stack_status = global_object.stack.enabled | ||
| global_object.stack.enabled = False | ||
|
|
||
| try: | ||
| model_results = bumps_fit(problem, **method_dict, **minimizer_kwargs, **kwargs) | ||
| self._set_parameter_fit_result(model_results, stack_status, problem._parameters) | ||
| results = self._gen_fit_results(model_results) | ||
| x_result, fx = driver.fit() | ||
| self._set_parameter_fit_result(x_result, driver, stack_status, problem._parameters) | ||
| results = self._gen_fit_results( | ||
| x_result, fx, driver, step_counter.last_step, max_evaluations | ||
| ) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Have you checked that the generated results looks the same now as they did with the simplied fit interface? Because the simplified interface was supposed to make a
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The proposed public API is Have we verified that the public |
||
| except Exception as e: | ||
| for key in self._cached_pars.keys(): | ||
| self._cached_pars[key].value = self._cached_pars_vals[key][0] | ||
| self._restore_parameter_values() | ||
| raise FitError(e) | ||
| finally: | ||
| global_object.stack.enabled = stack_status | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why add this now? Does this fix something that was broken before? Wasn't the stack already broken?
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yes, if the exception fired, the undo stack was not properly reset. |
||
| return results | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just adding my code here from my attempt at providing a proper error when the fit doesn't succeed. I had this block added just before # See: https://github.com/bumps/bumps/blob/master/bumps/fitters.py#L625 for the rationale behind this code.
if max_evaluations is None:
for bumps_fitter in FITTERS:
if bumps_fitter.id == self._method:
max_evaluations = bumps_fitter.settings[0][1] # This is the default number of steps for the method
break
if tolerance is None:
for bumps_fitter in FITTERS:
if bumps_fitter.id == self._method:
tolerance = min(bumps_fitter.settings[1][1], bumps_fitter.settings[2][1]) # These are ftol and xtol
break
if results.iterations == max_evaluations-1:
warnings.warn(f'Fit did not reach the desired tolerance of {tolerance} within the maximum number of evaluations of {max_evaluations}. ' # noqa: E501
f'Consider increasing the maximum number of evaluations or adjusting the tolerance.')
results.success = False |
||
|
|
||
| def convert_to_pars_obj(self, par_list: Optional[List] = None) -> List[BumpsParameter]: | ||
| @staticmethod | ||
| def _resolve_fitclass(method: str): | ||
| for fitclass in FITTERS: | ||
| if fitclass.id == method: | ||
| return fitclass | ||
| raise FitError(f'Unknown BUMPS fitting method: {method}') | ||
|
|
||
| def _build_progress_payload( | ||
| self, problem, iteration: int, point: np.ndarray, nllf: float | ||
| ) -> dict: | ||
| # Use the nllf already computed by the fitter to avoid a costly | ||
| # model re-evaluation, and let BUMPS apply its own chisq scaling. | ||
| chi2 = float(problem.chisq(nllf=nllf, norm=False)) | ||
| reduced_chi2 = float(problem.chisq(nllf=nllf, norm=True)) | ||
|
|
||
| parameter_values = self._current_parameter_snapshot(problem, point) | ||
|
|
||
| return { | ||
| 'iteration': iteration, | ||
| 'chi2': chi2, | ||
| 'reduced_chi2': reduced_chi2, | ||
| 'parameter_values': parameter_values, | ||
| 'refresh_plots': False, | ||
| 'finished': False, | ||
| } | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. What exactly are you trying to accomplish? Is the callback supposed to abort a fit when a certain parameter has reached a certain value?
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, BUMPS knows how to compute reduced chi2 |
||
|
|
||
| def _current_parameter_snapshot(self, problem, point: np.ndarray) -> dict: | ||
| labels = problem.labels() | ||
| values = problem.getp() if point is None else point | ||
| snapshot = {} | ||
| for label, value in zip(labels, values): | ||
| dict_name = label[len(MINIMIZER_PARAMETER_PREFIX) :] | ||
| snapshot[dict_name] = float(value) | ||
| return snapshot | ||
|
|
||
| def convert_to_pars_obj(self, par_list: List[Parameter] | None = None) -> List[BumpsParameter]: | ||
| """Create a container with the `Parameters` converted from the | ||
| base object. | ||
|
|
||
|
|
@@ -190,7 +282,7 @@ | |
| fixed=obj.fixed, | ||
| ) | ||
|
|
||
| def _make_model(self, parameters: Optional[List[BumpsParameter]] = None) -> Callable: | ||
| def _make_model(self, parameters: List[BumpsParameter] | None = None) -> Callable: | ||
| """Generate a bumps model from the supplied `fit_function` and | ||
| parameters in the base object. Note that this makes a callable | ||
| as it needs to be initialized with *x*, *y*, *weights* | ||
|
|
@@ -222,37 +314,53 @@ | |
| return _outer(self) | ||
|
|
||
| def _set_parameter_fit_result( | ||
| self, fit_result, stack_status: bool, par_list: List[BumpsParameter] | ||
| self, | ||
| x_result: np.ndarray, | ||
| driver: FitDriver, | ||
| stack_status: bool, | ||
| par_list: List[BumpsParameter], | ||
| ): | ||
| """Update parameters to their final values and assign a std | ||
| error to them. | ||
|
|
||
| :param fit_result: Fit object which contains info on the fit | ||
| :return: None | ||
| :rtype: noneType | ||
| :param x_result: Optimized parameter values from FitDriver | ||
| :param driver: The FitDriver instance (provides stderr) | ||
| :param stack_status: Whether the undo stack was enabled | ||
| :param par_list: List of BUMPS parameter objects | ||
| """ | ||
| from easyscience import global_object | ||
|
|
||
| pars = self._cached_pars | ||
| stderr = driver.stderr() | ||
|
|
||
| if stack_status: | ||
| for name in pars.keys(): | ||
| pars[name].value = self._cached_pars_vals[name][0] | ||
| pars[name].error = self._cached_pars_vals[name][1] | ||
| self._restore_parameter_values() | ||
| global_object.stack.enabled = True | ||
| global_object.stack.beginMacro('Fitting routine') | ||
|
|
||
| for index, name in enumerate([par.name for par in par_list]): | ||
| dict_name = name[len(MINIMIZER_PARAMETER_PREFIX) :] | ||
| pars[dict_name].value = fit_result.x[index] | ||
| pars[dict_name].error = fit_result.dx[index] | ||
| pars[dict_name].value = x_result[index] | ||
| pars[dict_name].error = stderr[index] | ||
| if stack_status: | ||
| global_object.stack.endMacro() | ||
|
|
||
| def _gen_fit_results(self, fit_results, **kwargs) -> FitResults: | ||
| def _gen_fit_results( | ||
| self, | ||
| x_result: np.ndarray, | ||
| fx: float, | ||
| driver: FitDriver, | ||
| n_evaluations: int = 0, | ||
| max_evaluations: int | None = None, | ||
| **kwargs, | ||
| ) -> FitResults: | ||
| """Convert fit results into the unified `FitResults` format. | ||
|
|
||
| :param fit_result: Fit object which contains info on the fit | ||
| :param x_result: Optimized parameter values from FitDriver | ||
| :param fx: Final objective function value | ||
| :param driver: The FitDriver instance | ||
| :param n_evaluations: Number of iterations completed | ||
| :param max_evaluations: Maximum evaluations budget (if set) | ||
| :return: fit results container | ||
| :rtype: FitResults | ||
| """ | ||
|
|
@@ -261,12 +369,19 @@ | |
| for name, value in kwargs.items(): | ||
| if getattr(results, name, False): | ||
| setattr(results, name, value) | ||
| results.success = fit_results.success | ||
| results.n_evaluations = n_evaluations | ||
|
rozyczko marked this conversation as resolved.
|
||
| # Bumps step counter is 0-indexed, so the last step of a budget of N | ||
| # is N-1. We therefore compare with ``max_evaluations - 1``. | ||
| if max_evaluations is not None and n_evaluations >= max_evaluations - 1: | ||
| results.success = False | ||
| results.message = f'Maximum number of evaluations ({max_evaluations}) reached' | ||
| else: | ||
| results.success = True | ||
| results.message = 'Optimization terminated successfully' | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Will address it in the other PR and merge with this here. |
||
| pars = self._cached_pars | ||
| item = {} | ||
| for index, name in enumerate(self._cached_model.pars.keys()): | ||
| dict_name = name[len(MINIMIZER_PARAMETER_PREFIX) :] | ||
|
|
||
| item[name] = pars[dict_name].value | ||
|
|
||
| results.p0 = self._p_0 | ||
|
|
@@ -275,10 +390,7 @@ | |
| results.y_obs = self._cached_model.y | ||
| results.y_calc = self.evaluate(results.x, minimizer_parameters=results.p) | ||
| results.y_err = self._cached_model.dy | ||
| # results.residual = results.y_obs - results.y_calc | ||
| # results.goodness_of_fit = np.sum(results.residual**2) | ||
| results.minimizer_engine = self.__class__ | ||
| results.fit_args = None | ||
| results.engine_result = fit_results | ||
| # results.check_sanity() | ||
| results.engine_result = driver | ||
| return results | ||
Uh oh!
There was an error while loading. Please reload this page.