Skip to content

fitting

calculators

interface_factory

InterfaceFactoryTemplate

This class allows for the creation and transference of interfaces.

Source code in src/easyscience/fitting/calculators/interface_factory.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
class InterfaceFactoryTemplate:
    """This class allows for the creation and transference of
    interfaces.
    """

    def __init__(self, interface_list: List[ABCMeta], *args, **kwargs):
        self._interfaces: List[ABCMeta] = interface_list
        self._current_interface: ABCMeta
        self.__interface_obj: None = None
        self.create(*args, **kwargs)

    def create(self, *args, **kwargs):
        """Create an interface to a calculator from those initialized.
        Interfaces can be selected by `interface_name` where
        `interface_name` is one of `obj.available_interfaces`. This
        interface can now be accessed by obj().

        :param interface_name: name of interface to be created
        :type interface_name: str
        :return: None
        :rtype: noneType
        """
        if kwargs.get('interface_name', None) is None:
            if len(self._interfaces) > 0:
                # Fallback name
                interface_name = self.return_name(self._interfaces[0])
            else:
                raise NotImplementedError
        else:
            interface_name = kwargs.pop('interface_name')
        interfaces = self.available_interfaces
        if interface_name in interfaces:
            self._current_interface = self._interfaces[interfaces.index(interface_name)]
        self.__interface_obj = self._current_interface(*args, **kwargs)

    def switch(self, new_interface: str, fitter: Optional[Type[Fitter]] = None):
        """Changes the current interface to a new interface. The current
        interface is destroyed and all SerializerComponent parameters
        carried over to the new interface. i.e. pick up where you left
        off.

        :param new_interface: name of new interface to be created
        :type new_interface: str
        :param fitter: Fitting interface which contains the fitting
            object which may have bindings which will be updated.
        :type fitter: EasyScience.fitting.Fitter
        :return: None
        :rtype: noneType
        """
        interfaces = self.available_interfaces
        if new_interface in interfaces:
            self._current_interface = self._interfaces[interfaces.index(new_interface)]
            self.__interface_obj = self._current_interface()
        else:
            raise AttributeError('The user supplied interface is not valid.')
        if fitter is not None:
            if hasattr(fitter, '_fit_object'):
                obj = getattr(fitter, '_fit_object')
                try:
                    if hasattr(obj, 'update_bindings'):
                        obj.update_bindings()
                except Exception as e:
                    print(f'Unable to auto generate bindings.\n{e}')
            elif hasattr(fitter, 'generate_bindings'):
                try:
                    fitter.generate_bindings()
                except Exception as e:
                    print(f'Unable to auto generate bindings.\n{e}')

    @property
    def available_interfaces(self) -> List[str]:
        """Return all available interfaces.

        :return: List of available interface names
        :rtype: List[str]
        """
        return [self.return_name(this_interface) for this_interface in self._interfaces]

    @property
    def current_interface(self) -> ABCMeta:
        """Returns the constructor for the currently selected interface.

        :return: Interface constructor
        :rtype: InterfaceTemplate
        """
        return self._current_interface

    @property
    def current_interface_name(self) -> str:
        """Returns the constructor name for the currently selected
        interface.

        :return: Interface constructor name
        :rtype: str
        """
        return self.return_name(self._current_interface)

    @property
    def fit_func(
        self,
    ) -> Callable:  # , x_array: np.ndarray, *args, **kwargs) -> np.ndarray:
        """Pass through to the underlying interfaces fitting function.

        :param x_array: points to be calculated at
        :type x_array: np.ndarray
        :param args: positional arguments for the fitting function
        :type args: Any
        :param kwargs: key/value pair arguments for the fitting function.
        :type kwargs: Any
        :return: points calculated at positional values `x`
        :rtype: np.ndarray
        #
        """

        def __fit_func(*args, **kwargs):
            return self.__interface_obj.fit_func(*args, **kwargs)

        return __fit_func

    def call(self, *args, **kwargs):
        return self.fit_func(*args, **kwargs)

    def generate_bindings(self, model, *args, ifun=None, **kwargs):
        """Automatically bind a `Parameter` to the corresponding
        interface.

        :param name: parameter name
        :type name: str
        :return: binding property
        :rtype: property
        """

        class_links = self.__interface_obj.create(model)
        props = model._get_linkable_attributes()
        props_names = [prop.name for prop in props]
        for item in class_links:
            for item_key in item.name_conversion.keys():
                if item_key not in props_names:
                    continue
                idx = props_names.index(item_key)
                prop = props[idx]

                # Should be fetched this way to ensure we don't get value from callback
                if hasattr(prop, 'value_no_call_back'):
                    # Property object
                    prop_value = prop.value_no_call_back
                else:
                    # Descriptor object
                    prop_value = prop.value

                prop._callback = item.make_prop(item_key)
                prop._callback.fset(prop_value)

    def __call__(self, *args, **kwargs) -> None:
        return self.__interface_obj

    def __reduce__(self):
        return (
            self.__state_restore__,
            (
                self.__class__,
                self.current_interface_name,
            ),
        )

    @staticmethod
    def __state_restore__(cls, interface_str):
        obj = cls()
        if interface_str in obj.available_interfaces:
            obj.switch(interface_str)
        return obj

    @staticmethod
    def return_name(this_interface) -> str:
        """Return an interfaces name."""
        interface_name = this_interface.__name__
        if hasattr(this_interface, 'name'):
            interface_name = getattr(this_interface, 'name')
        return interface_name
available_interfaces property

Return all available interfaces.

:return: List of available interface names :rtype: List[str]

create(*args, **kwargs)

Create an interface to a calculator from those initialized. Interfaces can be selected by interface_name where interface_name is one of obj.available_interfaces. This interface can now be accessed by obj().

:param interface_name: name of interface to be created :type interface_name: str :return: None :rtype: noneType

Source code in src/easyscience/fitting/calculators/interface_factory.py
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
def create(self, *args, **kwargs):
    """Create an interface to a calculator from those initialized.
    Interfaces can be selected by `interface_name` where
    `interface_name` is one of `obj.available_interfaces`. This
    interface can now be accessed by obj().

    :param interface_name: name of interface to be created
    :type interface_name: str
    :return: None
    :rtype: noneType
    """
    if kwargs.get('interface_name', None) is None:
        if len(self._interfaces) > 0:
            # Fallback name
            interface_name = self.return_name(self._interfaces[0])
        else:
            raise NotImplementedError
    else:
        interface_name = kwargs.pop('interface_name')
    interfaces = self.available_interfaces
    if interface_name in interfaces:
        self._current_interface = self._interfaces[interfaces.index(interface_name)]
    self.__interface_obj = self._current_interface(*args, **kwargs)
current_interface property

Returns the constructor for the currently selected interface.

:return: Interface constructor :rtype: InterfaceTemplate

current_interface_name property

Returns the constructor name for the currently selected interface.

:return: Interface constructor name :rtype: str

fit_func property

Pass through to the underlying interfaces fitting function.

:param x_array: points to be calculated at :type x_array: np.ndarray :param args: positional arguments for the fitting function :type args: Any :param kwargs: key/value pair arguments for the fitting function. :type kwargs: Any :return: points calculated at positional values x :rtype: np.ndarray

generate_bindings(model, *args, ifun=None, **kwargs)

Automatically bind a Parameter to the corresponding interface.

:param name: parameter name :type name: str :return: binding property :rtype: property

Source code in src/easyscience/fitting/calculators/interface_factory.py
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
def generate_bindings(self, model, *args, ifun=None, **kwargs):
    """Automatically bind a `Parameter` to the corresponding
    interface.

    :param name: parameter name
    :type name: str
    :return: binding property
    :rtype: property
    """

    class_links = self.__interface_obj.create(model)
    props = model._get_linkable_attributes()
    props_names = [prop.name for prop in props]
    for item in class_links:
        for item_key in item.name_conversion.keys():
            if item_key not in props_names:
                continue
            idx = props_names.index(item_key)
            prop = props[idx]

            # Should be fetched this way to ensure we don't get value from callback
            if hasattr(prop, 'value_no_call_back'):
                # Property object
                prop_value = prop.value_no_call_back
            else:
                # Descriptor object
                prop_value = prop.value

            prop._callback = item.make_prop(item_key)
            prop._callback.fset(prop_value)
return_name(this_interface) staticmethod

Return an interfaces name.

Source code in src/easyscience/fitting/calculators/interface_factory.py
191
192
193
194
195
196
197
@staticmethod
def return_name(this_interface) -> str:
    """Return an interfaces name."""
    interface_name = this_interface.__name__
    if hasattr(this_interface, 'name'):
        interface_name = getattr(this_interface, 'name')
    return interface_name
switch(new_interface, fitter=None)

Changes the current interface to a new interface. The current interface is destroyed and all SerializerComponent parameters carried over to the new interface. i.e. pick up where you left off.

:param new_interface: name of new interface to be created :type new_interface: str :param fitter: Fitting interface which contains the fitting object which may have bindings which will be updated. :type fitter: EasyScience.fitting.Fitter :return: None :rtype: noneType

Source code in src/easyscience/fitting/calculators/interface_factory.py
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
def switch(self, new_interface: str, fitter: Optional[Type[Fitter]] = None):
    """Changes the current interface to a new interface. The current
    interface is destroyed and all SerializerComponent parameters
    carried over to the new interface. i.e. pick up where you left
    off.

    :param new_interface: name of new interface to be created
    :type new_interface: str
    :param fitter: Fitting interface which contains the fitting
        object which may have bindings which will be updated.
    :type fitter: EasyScience.fitting.Fitter
    :return: None
    :rtype: noneType
    """
    interfaces = self.available_interfaces
    if new_interface in interfaces:
        self._current_interface = self._interfaces[interfaces.index(new_interface)]
        self.__interface_obj = self._current_interface()
    else:
        raise AttributeError('The user supplied interface is not valid.')
    if fitter is not None:
        if hasattr(fitter, '_fit_object'):
            obj = getattr(fitter, '_fit_object')
            try:
                if hasattr(obj, 'update_bindings'):
                    obj.update_bindings()
            except Exception as e:
                print(f'Unable to auto generate bindings.\n{e}')
        elif hasattr(fitter, 'generate_bindings'):
            try:
                fitter.generate_bindings()
            except Exception as e:
                print(f'Unable to auto generate bindings.\n{e}')

fitter

Fitter

Fitter is a class which makes it possible to undertake fitting utilizing one of the supported minimizers.

Source code in src/easyscience/fitting/fitter.py
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
class Fitter:
    """Fitter is a class which makes it possible to undertake fitting
    utilizing one of the supported minimizers.
    """

    def __init__(self, fit_object, fit_function: Callable):
        self._fit_object = fit_object
        self._fit_function = fit_function
        self._dependent_dims: int = None
        self._tolerance: float = None
        self._max_evaluations: int = None

        self._minimizer: MinimizerBase = None  # set in _update_minimizer
        self._enum_current_minimizer: AvailableMinimizers = None  # set in _update_minimizer
        self._update_minimizer(DEFAULT_MINIMIZER)

    def make_model(self, pars=None) -> Callable:
        return self._minimizer.make_model(pars)

    def evaluate(self, pars=None) -> np.ndarray:
        return self._minimizer.evaluate(pars)

    def convert_to_pars_obj(self, pars) -> object:
        return self._minimizer.convert_to_pars_obj(pars)

    # TODO: remove this method when we are ready to adjust the dependent products
    def initialize(self, fit_object, fit_function: Callable) -> None:
        """Set the model and callable in the calculator interface.

        :param fit_object: The EasyScience model object
        :param fit_function: The function to be optimized against.
        """
        self._fit_object = fit_object
        self._fit_function = fit_function
        self._update_minimizer(DEFAULT_MINIMIZER)

    # TODO: remove this method when we are ready to adjust the dependent products
    def create(self, minimizer_enum: Union[AvailableMinimizers, str] = DEFAULT_MINIMIZER) -> None:
        """Create the required minimizer.

        :param minimizer_enum: The enum of the minimization engine to
            create.
        """
        if isinstance(minimizer_enum, str):
            print(f'minimizer should be set with enum {minimizer_enum}')
            minimizer_enum = from_string_to_enum(minimizer_enum)
        self._update_minimizer(minimizer_enum)

    def switch_minimizer(self, minimizer_enum: Union[AvailableMinimizers, str]) -> None:
        """Switch minimizer and initialize.

        :param minimizer_enum: The enum of the minimizer to create and
            instantiate.
        """
        if isinstance(minimizer_enum, str):
            print(f'minimizer should be set with enum {minimizer_enum}')
            minimizer_enum = from_string_to_enum(minimizer_enum)

        self._update_minimizer(minimizer_enum)

    def _update_minimizer(self, minimizer_enum: AvailableMinimizers) -> None:
        self._minimizer = factory(
            minimizer_enum=minimizer_enum,
            fit_object=self._fit_object,
            fit_function=self.fit_function,
        )
        self._enum_current_minimizer = minimizer_enum

    @property
    def available_minimizers(self) -> List[str]:
        """Get a list of the names of available fitting minimizers.

        :return: List of available fitting minimizers
        :rtype: List[str]
        """
        return [minimize.name for minimize in AvailableMinimizers]

    @property
    def minimizer(self) -> MinimizerBase:
        """Get the current fitting minimizer object.

        :return:
        :rtype: MinimizerBase
        """
        return self._minimizer

    @property
    def tolerance(self) -> float:
        """Get the tolerance for the minimizer.

        :return: Tolerance for the minimizer
        """
        return self._tolerance

    @tolerance.setter
    def tolerance(self, tolerance: float) -> None:
        """Set the tolerance for the minimizer.

        :param tolerance: Tolerance for the minimizer
        """
        self._tolerance = tolerance

    @property
    def max_evaluations(self) -> int:
        """Get the maximal number of evaluations for the minimizer.

        :return: Maximal number of steps for the minimizer
        """
        return self._max_evaluations

    @max_evaluations.setter
    def max_evaluations(self, max_evaluations: int) -> None:
        """Set the maximal number of evaluations for the minimizer.

        :param max_evaluations: Maximal number of steps for the
            minimizer
        """
        self._max_evaluations = max_evaluations

    @property
    def fit_function(self) -> Callable:
        """The raw fit function that the optimizer will call (no
        wrapping) :return: Raw fit function.
        """
        return self._fit_function

    @fit_function.setter
    def fit_function(self, fit_function: Callable) -> None:
        """Set the raw fit function to a new one.

        :param fit_function: New fit function
        :return: None
        """
        self._fit_function = fit_function
        self._update_minimizer(self._enum_current_minimizer)

    @property
    def fit_object(self):
        """The EasyScience object which will be used as a model :return:

        EasyScience Model.
        """
        return self._fit_object

    @fit_object.setter
    def fit_object(self, fit_object) -> None:
        """Set the EasyScience object which wil be used as a model
        :param fit_object: New EasyScience object :return: None.
        """
        self._fit_object = fit_object
        self._update_minimizer(self._enum_current_minimizer)

    def _fit_function_wrapper(self, real_x=None, flatten: bool = True) -> Callable:
        """Simple fit function which injects the real X (independent)
        values into the optimizer function.

        This will also flatten the results if needed.
        :param real_x: Independent x parameters to be injected
        :param flatten: Should the result be a flat 1D array?
        :return: Wrapped optimizer function.
        """
        fun = self._fit_function

        @functools.wraps(fun)
        def wrapped_fit_function(x, **kwargs):
            if real_x is not None:
                x = real_x
            dependent = fun(x, **kwargs)
            if flatten:
                dependent = dependent.flatten()
            return dependent

        return wrapped_fit_function

    @property
    def fit(self) -> Callable:
        """Property which wraps the current `fit` function from the
        fitting interface.

        This property return a wrapped fit function which converts the
        input data into the correct shape for the optimizer, wraps the
        fit function to re-constitute the independent variables and once
        the fit is completed, reshape the inputs to those expected.
        """

        @functools.wraps(self._minimizer.fit)
        def inner_fit_callable(
            x: np.ndarray,
            y: np.ndarray,
            weights: Optional[np.ndarray] = None,
            vectorized: bool = False,
            progress_callback: Callable[[dict], bool | None] | None = None,
            **kwargs,
        ) -> FitResults:
            """This is a wrapped callable which performs the actual
            fitting. It is split into.

            3 sections, PRE/ FIT/ POST.
            - PRE = Reshaping the input data into the correct dimensions for the optimizer
            - FIT = Wrapping the fit function and performing the fit
            - POST = Reshaping the outputs so it is coherent with the inputs.
            """
            # Precompute - Reshape all independents into the correct dimensionality
            x_fit, x_new, y_new, weights, dims = self._precompute_reshaping(
                x, y, weights, vectorized
            )
            self._dependent_dims = dims

            # Fit
            fit_fun_org = self._fit_function
            fit_fun_wrap = self._fit_function_wrapper(
                x_new, flatten=True
            )  # This should be wrapped.
            self.fit_function = fit_fun_wrap
            f_res = self._minimizer.fit(
                x_fit,
                y_new,
                weights=weights,
                tolerance=self._tolerance,
                max_evaluations=self._max_evaluations,
                progress_callback=progress_callback,
                **kwargs,
            )

            # Postcompute
            fit_result = self._post_compute_reshaping(f_res, x, y)
            # Reset the function
            self.fit_function = fit_fun_org
            return fit_result

        return inner_fit_callable

    @staticmethod
    def _precompute_reshaping(
        x: np.ndarray,
        y: np.ndarray,
        weights: Optional[np.ndarray],
        vectorized: bool,
    ):
        """Check the dimensions of the inputs and reshape if necessary.

        :param x: ND matrix of dependent points
        :param y: N-1D matrix of independent points
        :param kwargs: Additional key-word arguments
        :return:
        """
        # Make sure that they are np arrays
        x_new = np.array(x)
        y_new = np.array(y)
        # Get the shape
        x_shape = x_new.shape
        # Check if the x data is 1D
        if len(x_shape) > 1:
            # It is ND data
            # Check if the data is vectorized. i.e. should x be [NxMx...x Ndims]
            if vectorized:
                # Assert that the shapes are the same
                if np.all(x_shape[:-1] != y_new.shape):
                    raise ValueError('The shape of the x and y data must be the same')
                # If so do nothing but note that the data is vectorized
                # x_shape = (-1,) # Should this be done?
            else:
                # Assert that the shapes are the same
                if np.prod(x_new.shape[:-1]) != y_new.size:
                    raise ValueError('The number of elements in x and y data must be the same')
                # Reshape the data to be [len(NxMx..), Ndims] i.e. flatten to columns
                x_new = x_new.reshape(-1, x_shape[-1], order='F')
        else:
            # Assert that the shapes are the same
            if np.all(x_shape != y_new.shape):
                raise ValueError('The shape of the x and y data must be the same')
            # It is 1D data
            x_new = x.flatten()
        # The optimizer needs a 1D array, flatten the y data
        y_new = y_new.flatten()
        if weights is not None:
            weights = np.array(weights).flatten()
        # Make a 'dummy' x array for the fit function
        x_for_fit = np.array(range(y_new.size))
        return x_for_fit, x_new, y_new, weights, x_shape

    @staticmethod
    def _post_compute_reshaping(
        fit_result: FitResults, x: np.ndarray, y: np.ndarray
    ) -> FitResults:
        """Reshape the output of the fitter into the correct dimensions.

        :param fit_result: Output from the fitter
        :param x: Input x independent
        :param y: Input y dependent
        :return: Reshaped Fit Results
        """
        fit_result.x = x
        fit_result.y_obs = y
        fit_result.y_calc = np.reshape(fit_result.y_calc, y.shape)
        fit_result.y_err = np.reshape(fit_result.y_err, y.shape)
        return fit_result

available_minimizers property

Get a list of the names of available fitting minimizers.

:return: List of available fitting minimizers :rtype: List[str]

create(minimizer_enum=DEFAULT_MINIMIZER)

Create the required minimizer.

:param minimizer_enum: The enum of the minimization engine to create.

Source code in src/easyscience/fitting/fitter.py
58
59
60
61
62
63
64
65
66
67
def create(self, minimizer_enum: Union[AvailableMinimizers, str] = DEFAULT_MINIMIZER) -> None:
    """Create the required minimizer.

    :param minimizer_enum: The enum of the minimization engine to
        create.
    """
    if isinstance(minimizer_enum, str):
        print(f'minimizer should be set with enum {minimizer_enum}')
        minimizer_enum = from_string_to_enum(minimizer_enum)
    self._update_minimizer(minimizer_enum)

fit property

Property which wraps the current fit function from the fitting interface.

This property return a wrapped fit function which converts the input data into the correct shape for the optimizer, wraps the fit function to re-constitute the independent variables and once the fit is completed, reshape the inputs to those expected.

fit_function property writable

The raw fit function that the optimizer will call (no wrapping) :return: Raw fit function.

fit_object property writable

The EasyScience object which will be used as a model :return:

EasyScience Model.

initialize(fit_object, fit_function)

Set the model and callable in the calculator interface.

:param fit_object: The EasyScience model object :param fit_function: The function to be optimized against.

Source code in src/easyscience/fitting/fitter.py
47
48
49
50
51
52
53
54
55
def initialize(self, fit_object, fit_function: Callable) -> None:
    """Set the model and callable in the calculator interface.

    :param fit_object: The EasyScience model object
    :param fit_function: The function to be optimized against.
    """
    self._fit_object = fit_object
    self._fit_function = fit_function
    self._update_minimizer(DEFAULT_MINIMIZER)

max_evaluations property writable

Get the maximal number of evaluations for the minimizer.

:return: Maximal number of steps for the minimizer

minimizer property

Get the current fitting minimizer object.

:return: :rtype: MinimizerBase

switch_minimizer(minimizer_enum)

Switch minimizer and initialize.

:param minimizer_enum: The enum of the minimizer to create and instantiate.

Source code in src/easyscience/fitting/fitter.py
69
70
71
72
73
74
75
76
77
78
79
def switch_minimizer(self, minimizer_enum: Union[AvailableMinimizers, str]) -> None:
    """Switch minimizer and initialize.

    :param minimizer_enum: The enum of the minimizer to create and
        instantiate.
    """
    if isinstance(minimizer_enum, str):
        print(f'minimizer should be set with enum {minimizer_enum}')
        minimizer_enum = from_string_to_enum(minimizer_enum)

    self._update_minimizer(minimizer_enum)

tolerance property writable

Get the tolerance for the minimizer.

:return: Tolerance for the minimizer

minimizers

bumps_utils

BumpsProgressMonitor

Bases: Monitor

BUMPS :class:Monitor that forwards per-step progress information to a user-supplied callback.

The monitor delegates payload construction to payload_builder so the BUMPS minimizer can keep all backend-specific payload semantics in one place.

Source code in src/easyscience/fitting/minimizers/bumps_utils/progress_monitor.py
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
class BumpsProgressMonitor(Monitor):
    """BUMPS :class:`Monitor` that forwards per-step progress
    information to a user-supplied callback.

    The monitor delegates payload construction to ``payload_builder`` so
    the BUMPS minimizer can keep all backend-specific payload semantics
    in one place.
    """

    def __init__(self, problem, callback, payload_builder):
        self._problem = problem
        self._callback = callback
        self._payload_builder = payload_builder

    def config_history(self, history):
        history.requires(step=1, point=1, value=1)

    def __call__(self, history):
        payload = self._payload_builder(
            problem=self._problem,
            iteration=int(history.step[0]),
            point=np.asarray(history.point[0]),
            nllf=float(history.value[0]),
        )
        self._callback(payload)

EvalCounter

Wrap a callable so the number of invocations is recorded on count.

Used by the BUMPS minimizer to count objective-function evaluations for cross-backend consistency with LMFit (nfev) and DFO-LS (nf).

Source code in src/easyscience/fitting/minimizers/bumps_utils/eval_counter.py
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
class EvalCounter:
    """Wrap a callable so the number of invocations is recorded on
    ``count``.

    Used by the BUMPS minimizer to count objective-function evaluations
    for cross-backend consistency with LMFit (``nfev``) and DFO-LS
    (``nf``).
    """

    def __init__(self, fn: Callable):
        self._fn = fn
        self.count = 0
        self.__name__ = getattr(fn, '__name__', self.__class__.__name__)
        self.__signature__ = inspect.signature(fn)
        functools.update_wrapper(self, fn)

    def __call__(self, *args, **kwargs):
        self.count += 1
        return self._fn(*args, **kwargs)

eval_counter

EvalCounter

Wrap a callable so the number of invocations is recorded on count.

Used by the BUMPS minimizer to count objective-function evaluations for cross-backend consistency with LMFit (nfev) and DFO-LS (nf).

Source code in src/easyscience/fitting/minimizers/bumps_utils/eval_counter.py
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
class EvalCounter:
    """Wrap a callable so the number of invocations is recorded on
    ``count``.

    Used by the BUMPS minimizer to count objective-function evaluations
    for cross-backend consistency with LMFit (``nfev``) and DFO-LS
    (``nf``).
    """

    def __init__(self, fn: Callable):
        self._fn = fn
        self.count = 0
        self.__name__ = getattr(fn, '__name__', self.__class__.__name__)
        self.__signature__ = inspect.signature(fn)
        functools.update_wrapper(self, fn)

    def __call__(self, *args, **kwargs):
        self.count += 1
        return self._fn(*args, **kwargs)

progress_monitor

BumpsProgressMonitor

Bases: Monitor

BUMPS :class:Monitor that forwards per-step progress information to a user-supplied callback.

The monitor delegates payload construction to payload_builder so the BUMPS minimizer can keep all backend-specific payload semantics in one place.

Source code in src/easyscience/fitting/minimizers/bumps_utils/progress_monitor.py
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
class BumpsProgressMonitor(Monitor):
    """BUMPS :class:`Monitor` that forwards per-step progress
    information to a user-supplied callback.

    The monitor delegates payload construction to ``payload_builder`` so
    the BUMPS minimizer can keep all backend-specific payload semantics
    in one place.
    """

    def __init__(self, problem, callback, payload_builder):
        self._problem = problem
        self._callback = callback
        self._payload_builder = payload_builder

    def config_history(self, history):
        history.requires(step=1, point=1, value=1)

    def __call__(self, history):
        payload = self._payload_builder(
            problem=self._problem,
            iteration=int(history.step[0]),
            point=np.asarray(history.point[0]),
            nllf=float(history.value[0]),
        )
        self._callback(payload)

minimizer_base

MinimizerBase

This template class is the basis for all minimizer engines in EasyScience.

Source code in src/easyscience/fitting/minimizers/minimizer_base.py
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
class MinimizerBase(metaclass=ABCMeta):
    """This template class is the basis for all minimizer engines in
    `EasyScience`.
    """

    package: str = None

    def __init__(
        self,
        obj,  #: ObjBase,
        fit_function: Callable,
        minimizer_enum: AvailableMinimizers,
    ):  # todo after constraint changes, add type hint: obj: ObjBase  # noqa: E501
        if minimizer_enum.method not in self.supported_methods():
            raise FitError(f'Method {minimizer_enum.method} not available in {self.__class__}')
        self._object = obj
        self._original_fit_function = fit_function
        self._minimizer_enum = minimizer_enum
        self._method = minimizer_enum.method
        self._cached_pars: Dict[str, Parameter] = {}
        self._cached_pars_vals: Dict[str, Tuple[float]] = {}
        self._cached_model = None
        self._fit_function = None

    @property
    def enum(self) -> AvailableMinimizers:
        return self._minimizer_enum

    @property
    def name(self) -> str:
        return self._minimizer_enum.name

    def _restore_parameter_values(self) -> None:
        for key in self._cached_pars.keys():
            self._cached_pars[key].value = self._cached_pars_vals[key][0]
            self._cached_pars[key].error = self._cached_pars_vals[key][1]

    @abstractmethod
    def fit(
        self,
        x: np.ndarray,
        y: np.ndarray,
        weights: np.ndarray,
        model: Callable | None = None,
        parameters: List[Parameter] | None = None,
        method: str | None = None,
        tolerance: float | None = None,
        max_evaluations: int | None = None,
        progress_callback: Callable[[dict], bool | None] | None = None,
        **kwargs,
    ) -> FitResults:
        """Perform a fit using the  engine.

        :param x: points to be calculated at
        :type x: np.ndarray
        :param y: measured points
        :type y: np.ndarray
        :param weights: Weights for supplied measured points
        :type weights: np.ndarray
        :param model: Optional Model which is being fitted to
        :param parameters: Optional parameters for the fit
        :param method: method for the minimizer to use.
        :type method: str
        :param kwargs: Additional arguments for the fitting function.
        :return: Fit results
        """

    def evaluate(
        self, x: np.ndarray, minimizer_parameters: dict[str, float] | None = None, **kwargs
    ) -> np.ndarray:
        """Evaluate the fit function for values of x. Parameters used
        are either the latest or user supplied. If the parameters are
        user supplied, it must be in a dictionary of {'parameter_name':
        parameter_value,...}.

        :param x: x values for which the fit function will be evaluated
        :type x:  np.ndarray
        :param minimizer_parameters: Dictionary of parameters which will be used in the fit function. They must be in a dictionary
         of {'parameter_name': parameter_value,...}
        :type minimizer_parameters: dict
        :param kwargs: additional arguments
        :return: y values calculated at points x for a set of parameters.
        :rtype: np.ndarray
        """  # noqa: E501
        if minimizer_parameters is None:
            minimizer_parameters = {}
        if not isinstance(minimizer_parameters, dict):
            raise TypeError('minimizer_parameters must be a dictionary')

        if self._fit_function is None:
            # This will also generate self._cached_pars
            self._fit_function = self._generate_fit_function()

        minimizer_parameters = self._prepare_parameters(minimizer_parameters)

        return self._fit_function(x, **minimizer_parameters, **kwargs)

    def _get_method_kwargs(self, passed_method: str | None = None) -> dict[str, str]:
        if passed_method is not None:
            if passed_method not in self.supported_methods():
                raise FitError(f'Method {passed_method} not available in {self.__class__}')
            return {'method': passed_method}

        if self._method is not None:
            return {'method': self._method}

        return {}

    @abstractmethod
    def convert_to_pars_obj(self, par_list: List[Parameter] | None = None):
        """Create an engine compatible container with the `Parameters`
        converted from the base object.

        :param par_list: If only a single/selection of parameter is
            required. Specify as a list
        :type par_list: List[str]
        :return: engine Parameters compatible object
        """

    @staticmethod
    @abstractmethod
    def supported_methods() -> List[str]:
        """Return a list of supported methods for the minimizer.

        :return: List of supported methods
        :rtype: List[str]
        """

    @staticmethod
    @abstractmethod
    def all_methods() -> List[str]:
        """Return a list of all available methods for the minimizer.

        :return: List of all available methods
        :rtype: List[str]
        """

    @staticmethod
    @abstractmethod
    def convert_to_par_object(obj):  # todo after constraint changes, add type hint: obj: ObjBase
        """Convert an `EasyScience.variable.Parameter` object to an
        engine Parameter object.
        """

    def _prepare_parameters(self, parameters: dict[str, float]) -> dict[str, float]:
        """Prepare the parameters for the minimizer.

        :param parameters: Dict of parameters for the minimizer with
            names as keys.
        """
        pars = self._cached_pars

        for name, item in pars.items():
            parameter_name = MINIMIZER_PARAMETER_PREFIX + str(name)
            if parameter_name not in parameters.keys():
                parameters[parameter_name] = item.value
        return parameters

    def _generate_fit_function(self) -> Callable:
        """Using the user supplied `fit_function`, wrap it in such a way
        we can update `Parameter` on iterations.

        :return: a fit function which is compatible with bumps models
        """
        # Original fit function
        func = self._original_fit_function
        # Get a list of `Parameters`
        self._cached_pars = {}
        self._cached_pars_vals = {}
        for parameter in self._object.get_fit_parameters():
            key = parameter.unique_name
            self._cached_pars[key] = parameter
            self._cached_pars_vals[key] = (parameter.value, parameter.error)

        # Make a new fit function
        def _fit_function(x: np.ndarray, **kwargs):
            """Wrapped fit function which now has an EasyScience
            compatible form.

            :param x: array of data points to be calculated
            :type x: np.ndarray
            :param kwargs: key word arguments
            :return: points calculated at `x`
            :rtype: np.ndarray
            """
            # Update the `Parameter` values and the callback if needed
            # TODO THIS IS NOT THREAD SAFE :-(

            for name, value in kwargs.items():
                par_name = name[1:]
                if par_name in self._cached_pars.keys():
                    # This will take into account constraints
                    if self._cached_pars[par_name].value != value:
                        self._cached_pars[par_name].value = value

                    # Since we are calling the parameter fset will be called.
            # TODO Pre processing here
            return_data = func(x)
            # TODO Loading or manipulating data here
            return return_data

        _fit_function.__signature__ = self._create_signature(self._cached_pars)
        return _fit_function

    @staticmethod
    def _create_signature(parameters: Dict[int, Parameter]) -> Signature:
        """Wrap the function signature.

        This is done as lmfit wants the function to be in the form:
        f = (x, a=1, b=2)...
        Where we need to be generic. Note that this won't hold for much outside of this scope.
        """
        wrapped_parameters = []
        wrapped_parameters.append(
            InspectParameter('x', InspectParameter.POSITIONAL_OR_KEYWORD, annotation=_empty)
        )

        for name, parameter in parameters.items():
            default_value = parameter.value

            wrapped_parameters.append(
                InspectParameter(
                    MINIMIZER_PARAMETER_PREFIX + str(name),
                    InspectParameter.POSITIONAL_OR_KEYWORD,
                    annotation=_empty,
                    default=default_value,
                )
            )
        return Signature(wrapped_parameters)

    @staticmethod
    def _error_from_jacobian(
        jacobian: np.ndarray, residuals: np.ndarray, confidence: float = 0.95
    ) -> np.ndarray:
        from scipy import stats

        JtJi = np.linalg.inv(np.dot(jacobian.T, jacobian))
        # 1.96 is a 95% confidence value
        error_matrix = np.dot(
            JtJi,
            np.dot(jacobian.T, np.dot(np.diag(residuals**2), np.dot(jacobian, JtJi))),
        )

        z = 1 - ((1 - confidence) / 2)
        z = stats.norm.pdf(z)
        error_matrix = z * np.sqrt(error_matrix)
        return error_matrix
all_methods() abstractmethod staticmethod

Return a list of all available methods for the minimizer.

:return: List of all available methods :rtype: List[str]

Source code in src/easyscience/fitting/minimizers/minimizer_base.py
156
157
158
159
160
161
162
163
@staticmethod
@abstractmethod
def all_methods() -> List[str]:
    """Return a list of all available methods for the minimizer.

    :return: List of all available methods
    :rtype: List[str]
    """
convert_to_par_object(obj) abstractmethod staticmethod

Convert an EasyScience.variable.Parameter object to an engine Parameter object.

Source code in src/easyscience/fitting/minimizers/minimizer_base.py
165
166
167
168
169
170
@staticmethod
@abstractmethod
def convert_to_par_object(obj):  # todo after constraint changes, add type hint: obj: ObjBase
    """Convert an `EasyScience.variable.Parameter` object to an
    engine Parameter object.
    """
convert_to_pars_obj(par_list=None) abstractmethod

Create an engine compatible container with the Parameters converted from the base object.

:param par_list: If only a single/selection of parameter is required. Specify as a list :type par_list: List[str] :return: engine Parameters compatible object

Source code in src/easyscience/fitting/minimizers/minimizer_base.py
136
137
138
139
140
141
142
143
144
145
@abstractmethod
def convert_to_pars_obj(self, par_list: List[Parameter] | None = None):
    """Create an engine compatible container with the `Parameters`
    converted from the base object.

    :param par_list: If only a single/selection of parameter is
        required. Specify as a list
    :type par_list: List[str]
    :return: engine Parameters compatible object
    """
evaluate(x, minimizer_parameters=None, **kwargs)

Evaluate the fit function for values of x. Parameters used are either the latest or user supplied. If the parameters are user supplied, it must be in a dictionary of {'parameter_name': parameter_value,...}.

:param x: x values for which the fit function will be evaluated :type x: np.ndarray :param minimizer_parameters: Dictionary of parameters which will be used in the fit function. They must be in a dictionary of {'parameter_name': parameter_value,...} :type minimizer_parameters: dict :param kwargs: additional arguments :return: y values calculated at points x for a set of parameters. :rtype: np.ndarray

Source code in src/easyscience/fitting/minimizers/minimizer_base.py
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
def evaluate(
    self, x: np.ndarray, minimizer_parameters: dict[str, float] | None = None, **kwargs
) -> np.ndarray:
    """Evaluate the fit function for values of x. Parameters used
    are either the latest or user supplied. If the parameters are
    user supplied, it must be in a dictionary of {'parameter_name':
    parameter_value,...}.

    :param x: x values for which the fit function will be evaluated
    :type x:  np.ndarray
    :param minimizer_parameters: Dictionary of parameters which will be used in the fit function. They must be in a dictionary
     of {'parameter_name': parameter_value,...}
    :type minimizer_parameters: dict
    :param kwargs: additional arguments
    :return: y values calculated at points x for a set of parameters.
    :rtype: np.ndarray
    """  # noqa: E501
    if minimizer_parameters is None:
        minimizer_parameters = {}
    if not isinstance(minimizer_parameters, dict):
        raise TypeError('minimizer_parameters must be a dictionary')

    if self._fit_function is None:
        # This will also generate self._cached_pars
        self._fit_function = self._generate_fit_function()

    minimizer_parameters = self._prepare_parameters(minimizer_parameters)

    return self._fit_function(x, **minimizer_parameters, **kwargs)
fit(x, y, weights, model=None, parameters=None, method=None, tolerance=None, max_evaluations=None, progress_callback=None, **kwargs) abstractmethod

Perform a fit using the engine.

:param x: points to be calculated at :type x: np.ndarray :param y: measured points :type y: np.ndarray :param weights: Weights for supplied measured points :type weights: np.ndarray :param model: Optional Model which is being fitted to :param parameters: Optional parameters for the fit :param method: method for the minimizer to use. :type method: str :param kwargs: Additional arguments for the fitting function. :return: Fit results

Source code in src/easyscience/fitting/minimizers/minimizer_base.py
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
@abstractmethod
def fit(
    self,
    x: np.ndarray,
    y: np.ndarray,
    weights: np.ndarray,
    model: Callable | None = None,
    parameters: List[Parameter] | None = None,
    method: str | None = None,
    tolerance: float | None = None,
    max_evaluations: int | None = None,
    progress_callback: Callable[[dict], bool | None] | None = None,
    **kwargs,
) -> FitResults:
    """Perform a fit using the  engine.

    :param x: points to be calculated at
    :type x: np.ndarray
    :param y: measured points
    :type y: np.ndarray
    :param weights: Weights for supplied measured points
    :type weights: np.ndarray
    :param model: Optional Model which is being fitted to
    :param parameters: Optional parameters for the fit
    :param method: method for the minimizer to use.
    :type method: str
    :param kwargs: Additional arguments for the fitting function.
    :return: Fit results
    """
supported_methods() abstractmethod staticmethod

Return a list of supported methods for the minimizer.

:return: List of supported methods :rtype: List[str]

Source code in src/easyscience/fitting/minimizers/minimizer_base.py
147
148
149
150
151
152
153
154
@staticmethod
@abstractmethod
def supported_methods() -> List[str]:
    """Return a list of supported methods for the minimizer.

    :return: List of supported methods
    :rtype: List[str]
    """

minimizer_bumps

Bumps

Bases: MinimizerBase

This is a wrapper to Bumps: https://bumps.readthedocs.io/ It allows for the Bumps fitting engine to use parameters declared in an EasyScience.base_classes.ObjBase.

Source code in src/easyscience/fitting/minimizers/minimizer_bumps.py
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
class Bumps(MinimizerBase):
    """
    This is a wrapper to Bumps: https://bumps.readthedocs.io/
    It allows for the Bumps fitting engine to use parameters declared in an `EasyScience.base_classes.ObjBase`.
    """

    package = 'bumps'

    def __init__(
        self,
        obj,  #: ObjBase,
        fit_function: Callable,
        minimizer_enum: AvailableMinimizers | None = None,
    ):  # todo after constraint changes, add type hint: obj: ObjBase  # noqa: E501
        """Initialize the fitting engine with a `ObjBase` and an
        arbitrary fitting function.

        :param obj: Object containing elements of the `Parameter` class
        :type obj: ObjBase
        :param fit_function: function that when called returns y values. 'x' must be the first
                            and only positional argument. Additional values can be supplied by
                            keyword/value pairs
        :type fit_function: Callable
        """
        super().__init__(obj=obj, fit_function=fit_function, minimizer_enum=minimizer_enum)
        self._p_0 = {}
        self._eval_counter: EvalCounter | None = None

    @staticmethod
    def all_methods() -> List[str]:
        return FIT_AVAILABLE_IDS_FILTERED

    @staticmethod
    def supported_methods() -> List[str]:
        # only a small subset
        methods = ['amoeba', 'newton', 'lm']
        return methods

    def fit(
        self,
        x: np.ndarray,
        y: np.ndarray,
        weights: np.ndarray,
        model: Callable | None = None,
        parameters: List[Parameter] | None = None,
        method: str | None = None,
        tolerance: float | None = None,
        max_evaluations: int | None = None,
        progress_callback: Callable[[dict], bool | None] | None = None,
        minimizer_kwargs: dict | None = None,
        engine_kwargs: dict | None = None,
        **kwargs,
    ) -> FitResults:
        """Perform a fit using the BUMPS engine.

        :param x: points to be calculated at
        :type x: np.ndarray
        :param y: measured points
        :type y: np.ndarray
        :param weights: Weights for supplied measured points
        :type weights: np.ndarray
        :param model: Optional Model which is being fitted to
        :param parameters: Optional parameters for the fit
        :type parameters: List[BumpsParameter]
        :param method: Method for minimization
        :type method: str
        :param max_evaluations: Maximum number of optimizer steps. Forwarded to BUMPS as
            its ``steps`` parameter. If ``None``, the default value defined by the
            selected BUMPS fitter (``fitclass.settings``) is used.
        :type max_evaluations: int | None
        :param progress_callback: Optional callback for progress updates. The payload
            field ``iteration`` carries the BUMPS optimizer step index.
        :type progress_callback: Callable

        .. note::
            The :class:`FitResults` field ``n_evaluations`` reports the number of
            **objective-function evaluations** consumed by the fit, for cross-backend
            consistency with LMFit (``nfev``) and DFO-LS (``nf``). For BUMPS this is
            distinct from the optimizer **step count** that ``max_evaluations`` (i.e.
            BUMPS ``steps``) is budgeted against and returned as
            :class:`FitResults.iterations`; a single step may trigger several objective
            evaluations, so ``n_evaluations`` can legitimately exceed
            ``max_evaluations``. The budget-exhaustion check is performed against
            ``iterations``, not ``n_evaluations``.

        :return: Fit results
        :rtype: FitResults
        """
        method_dict = self._get_method_kwargs(method)

        x, y, weights = np.asarray(x), np.asarray(y), np.asarray(weights)

        if y.shape != x.shape:
            raise ValueError('x and y must have the same shape.')

        if weights.shape != x.shape:
            raise ValueError('Weights must have the same shape as x and y.')

        if not np.isfinite(weights).all():
            raise ValueError('Weights cannot be NaN or infinite.')

        if (weights <= 0).any():
            raise ValueError('Weights must be strictly positive and non-zero.')

        if engine_kwargs is None:
            engine_kwargs = {}

        if minimizer_kwargs is None:
            minimizer_kwargs = {}
        minimizer_kwargs.update(engine_kwargs)

        method_str = method_dict.get('method', self._method)
        fitclass = self._resolve_fitclass(method_str)

        # Resolve BUMPS-native defaults so the budget reported back to the caller (and
        # used by the budget-exhaustion check in `_gen_fit_results`) reflects the values
        # actually consumed by the fitter, even when the caller passes None.
        fitter_settings = dict(fitclass.settings)
        if max_evaluations is None:
            max_evaluations = fitter_settings.get('steps')
        if tolerance is None:
            ftol = fitter_settings.get('ftol')
            xtol = fitter_settings.get('xtol')
            tols = [t for t in (ftol, xtol) if t is not None]
            tolerance = min(tols) if tols else None

        if tolerance is not None:
            minimizer_kwargs['ftol'] = tolerance  # tolerance for change in function value
            minimizer_kwargs['xtol'] = (
                tolerance  # tolerance for change in parameter value, could be an independent value
            )
        if max_evaluations is not None:
            minimizer_kwargs['steps'] = max_evaluations

        if model is None:
            model_function = self._make_model(parameters=parameters)
            model = model_function(x, y, weights)
        self._cached_model = model

        self._p_0 = {f'p{key}': self._cached_pars[key].value for key in self._cached_pars.keys()}

        problem = FitProblem(model)

        monitors = []
        if progress_callback is not None:
            if not callable(progress_callback):
                raise ValueError('progress_callback must be callable')
            monitors.append(
                BumpsProgressMonitor(problem, progress_callback, self._build_progress_payload)
            )

        driver = FitDriver(
            fitclass=fitclass,
            problem=problem,
            monitors=monitors,
            **minimizer_kwargs,
            **kwargs,
        )
        driver.clip()

        # Why do we do this? Because a fitting template has to have global_object instantiated outside pre-runtime
        from easyscience import global_object

        stack_status = global_object.stack.enabled
        global_object.stack.enabled = False

        try:
            # Drive the fit through the local FitDriver instance so the supplied
            # `monitors` (including the optional progress callback monitor) are
            # invoked. `bumps.fitters.fit` constructs its own driver.
            x, fx = driver.fit()
            from scipy.optimize import OptimizeResult

            # BUMPS' `MonitorRunner.history.step` is populated by the driver itself
            # (independently of any user-supplied monitors) and exposes the canonical
            # last-step index reached by the fitter, so we use it as `nit`.
            history_step = getattr(getattr(driver, 'monitor_runner', None), 'history', None)
            nit_value = int(history_step.step[0]) if history_step is not None else None
            model_results = OptimizeResult(
                x=x,
                dx=driver.stderr(),
                fun=fx,
                success=True,
                status=0,
                message='successful termination',
                nit=nit_value,
            )
            model_results.state = driver.fitter.state
            self._set_parameter_fit_result(model_results, stack_status, problem._parameters)
            results = self._gen_fit_results(
                model_results,
                max_evaluations=max_evaluations,
                tolerance=tolerance,
            )
        except Exception as e:
            self._restore_parameter_values()
            raise FitError(e)
        finally:
            global_object.stack.enabled = stack_status
        return results

    @staticmethod
    def _resolve_fitclass(method: str):
        for fitclass in FITTERS:
            if fitclass.id == method:
                return fitclass
        raise FitError(f'Unknown BUMPS fitting method: {method}')

    def _build_progress_payload(
        self, problem, iteration: int, point: np.ndarray, nllf: float
    ) -> dict:
        # Use the nllf already computed by the fitter to avoid a costly
        # model re-evaluation, and let BUMPS apply its own chisq scaling.
        chi2 = float(problem.chisq(nllf=nllf, norm=False))
        reduced_chi2 = float(problem.chisq(nllf=nllf, norm=True))

        parameter_values = self._current_parameter_snapshot(problem, point)

        return {
            'iteration': iteration,
            'chi2': chi2,
            'reduced_chi2': reduced_chi2,
            'parameter_values': parameter_values,
            'refresh_plots': False,
            'finished': False,
        }

    def _current_parameter_snapshot(self, problem, point: np.ndarray) -> dict:
        labels = problem.labels()
        values = problem.getp() if point is None else point
        snapshot = {}
        for label, value in zip(labels, values):
            dict_name = label[len(MINIMIZER_PARAMETER_PREFIX) :]
            snapshot[dict_name] = float(value)
        return snapshot

    def convert_to_pars_obj(self, par_list: List[Parameter] | None = None) -> List[BumpsParameter]:
        """Create a container with the `Parameters` converted from the
        base object.

        :param par_list: If only a single/selection of parameter is
            required. Specify as a list
        :type par_list: List[str]
        :return: bumps Parameters list
        :rtype: List[BumpsParameter]
        """
        if par_list is None:
            # Assume that we have a ObjBase for which we can obtain a list
            par_list = self._object.get_fit_parameters()
        pars_obj = [self.__class__.convert_to_par_object(obj) for obj in par_list]
        return pars_obj

    # For some reason I have to double staticmethod :-/
    @staticmethod
    def convert_to_par_object(obj) -> BumpsParameter:
        """Convert an `EasyScience.variable.Parameter` object to a bumps
        Parameter object.

        :return: bumps Parameter compatible object.
        :rtype: BumpsParameter
        """

        value = obj.value

        return BumpsParameter(
            name=MINIMIZER_PARAMETER_PREFIX + obj.unique_name,
            value=value,
            bounds=[obj.min, obj.max],
            fixed=obj.fixed,
        )

    def _make_model(self, parameters: List[BumpsParameter] | None = None) -> Callable:
        """Generate a bumps model from the supplied `fit_function` and
        parameters in the base object. Note that this makes a callable
        as it needs to be initialized with *x*, *y*, *weights*

        Weights are converted to dy (standard deviation of y).

        :return: Callable to make a bumps Curve model
        :rtype: Callable
        """
        fit_func = EvalCounter(self._generate_fit_function())
        self._eval_counter = fit_func

        def _outer(obj):
            def _make_func(x, y, weights):
                bumps_pars = {}
                if not parameters:
                    for name, par in obj._cached_pars.items():
                        bumps_pars[MINIMIZER_PARAMETER_PREFIX + str(name)] = (
                            obj.convert_to_par_object(par)
                        )
                else:
                    for par in parameters:
                        bumps_pars[MINIMIZER_PARAMETER_PREFIX + par.unique_name] = (
                            obj.convert_to_par_object(par)
                        )
                return Curve(fit_func, x, y, dy=1 / weights, **bumps_pars)

            return _make_func

        return _outer(self)

    def _set_parameter_fit_result(
        self,
        fit_result,
        stack_status: bool,
        par_list: List[BumpsParameter],
    ):
        """Update parameters to their final values and assign a std
        error to them.

        :param fit_result: BUMPS OptimizeResult containing best-fit
            values and errors
        :param stack_status: Whether the undo stack was enabled
        :param par_list: List of BUMPS parameter objects
        """
        from easyscience import global_object

        pars = self._cached_pars
        x_result = np.asarray(fit_result.x)
        stderr = np.asarray(fit_result.dx)

        if stack_status:
            self._restore_parameter_values()
            global_object.stack.enabled = True
            global_object.stack.beginMacro('Fitting routine')

        for index, name in enumerate([par.name for par in par_list]):
            dict_name = name[len(MINIMIZER_PARAMETER_PREFIX) :]
            pars[dict_name].value = x_result[index]
            pars[dict_name].error = stderr[index]
        if stack_status:
            global_object.stack.endMacro()

    def _gen_fit_results(
        self,
        fit_results,
        max_evaluations: int | None = None,
        tolerance: float | None = None,
        **kwargs,
    ) -> FitResults:
        """Convert fit results into the unified `FitResults` format.

        :param x_result: Optimized parameter values from FitDriver
        :param fx: Final objective function value
        :param driver: The FitDriver instance
        :param n_evaluations: Number of iterations completed
        :param max_evaluations: Maximum evaluations budget (if set)
        :return: fit results container
        :rtype: FitResults
        """
        results = FitResults()

        for name, value in kwargs.items():
            if getattr(results, name, False):
                setattr(results, name, value)
        n_evaluations = None if self._eval_counter is None else self._eval_counter.count
        # BUMPS exposes `nit` as the last reported optimizer step index rather than the
        # total number of objective calls. We keep `n_evaluations` as objective-call
        # count for cross-backend consistency with LMFit (`nfev`) and DFO-LS (`nf`).
        n_iterations = getattr(fit_results, 'nit', None)
        # Convert the zero-based step index into the number of optimizer steps that have
        # actually been consumed against the configured BUMPS `steps` budget.
        n_steps_used = None if n_iterations is None else n_iterations + 1
        stopped_on_budget = max_evaluations is not None and (
            # For BUMPS, `max_evaluations` is forwarded as `steps`, so budget
            # exhaustion must be checked against consumed optimizer steps, not raw
            # objective evaluations, which can legitimately exceed the step budget.
            (n_steps_used is not None and n_steps_used >= max_evaluations)
            or (
                n_iterations is None
                and n_evaluations is not None
                and n_evaluations >= max_evaluations
            )
        )

        results.success = fit_results.success and not stopped_on_budget
        pars = self._cached_pars
        item = {}
        for index, name in enumerate(self._cached_model.pars.keys()):
            dict_name = name[len(MINIMIZER_PARAMETER_PREFIX) :]
            item[name] = pars[dict_name].value

        results.p0 = self._p_0
        results.p = item
        results.x = self._cached_model.x
        results.y_obs = self._cached_model.y
        results.y_calc = self.evaluate(results.x, minimizer_parameters=results.p)
        results.y_err = self._cached_model.dy
        results.n_evaluations = n_evaluations
        results.iterations = n_steps_used
        results.message = ''
        if stopped_on_budget:
            results.message = (
                f'Fit stopped: reached maximum optimizer steps ({max_evaluations}); '
                f'objective evaluated {n_evaluations} times'
            )
        if stopped_on_budget:
            if tolerance is None:
                warnings.warn(
                    f'Fit did not converge within the maximum optimizer steps of {max_evaluations} '
                    f'({n_evaluations} objective evaluations). '
                    'Consider increasing the maximum number of evaluations or adjusting the tolerance.',
                    UserWarning,
                )
            else:
                warnings.warn(
                    f'Fit did not reach the desired tolerance of {tolerance} within the maximum optimizer steps of {max_evaluations} '
                    f'({n_evaluations} objective evaluations). '
                    'Consider increasing the maximum number of evaluations or adjusting the tolerance.',
                    UserWarning,
                )

        # results.residual = results.y_obs - results.y_calc
        # results.goodness_of_fit = np.sum(results.residual**2)
        results.minimizer_engine = self.__class__
        results.fit_args = None
        results.engine_result = fit_results
        return results
__init__(obj, fit_function, minimizer_enum=None)

Initialize the fitting engine with a ObjBase and an arbitrary fitting function.

:param obj: Object containing elements of the Parameter class :type obj: ObjBase :param fit_function: function that when called returns y values. 'x' must be the first and only positional argument. Additional values can be supplied by keyword/value pairs :type fit_function: Callable

Source code in src/easyscience/fitting/minimizers/minimizer_bumps.py
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
def __init__(
    self,
    obj,  #: ObjBase,
    fit_function: Callable,
    minimizer_enum: AvailableMinimizers | None = None,
):  # todo after constraint changes, add type hint: obj: ObjBase  # noqa: E501
    """Initialize the fitting engine with a `ObjBase` and an
    arbitrary fitting function.

    :param obj: Object containing elements of the `Parameter` class
    :type obj: ObjBase
    :param fit_function: function that when called returns y values. 'x' must be the first
                        and only positional argument. Additional values can be supplied by
                        keyword/value pairs
    :type fit_function: Callable
    """
    super().__init__(obj=obj, fit_function=fit_function, minimizer_enum=minimizer_enum)
    self._p_0 = {}
    self._eval_counter: EvalCounter | None = None
convert_to_par_object(obj) staticmethod

Convert an EasyScience.variable.Parameter object to a bumps Parameter object.

:return: bumps Parameter compatible object. :rtype: BumpsParameter

Source code in src/easyscience/fitting/minimizers/minimizer_bumps.py
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
@staticmethod
def convert_to_par_object(obj) -> BumpsParameter:
    """Convert an `EasyScience.variable.Parameter` object to a bumps
    Parameter object.

    :return: bumps Parameter compatible object.
    :rtype: BumpsParameter
    """

    value = obj.value

    return BumpsParameter(
        name=MINIMIZER_PARAMETER_PREFIX + obj.unique_name,
        value=value,
        bounds=[obj.min, obj.max],
        fixed=obj.fixed,
    )
convert_to_pars_obj(par_list=None)

Create a container with the Parameters converted from the base object.

:param par_list: If only a single/selection of parameter is required. Specify as a list :type par_list: List[str] :return: bumps Parameters list :rtype: List[BumpsParameter]

Source code in src/easyscience/fitting/minimizers/minimizer_bumps.py
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
def convert_to_pars_obj(self, par_list: List[Parameter] | None = None) -> List[BumpsParameter]:
    """Create a container with the `Parameters` converted from the
    base object.

    :param par_list: If only a single/selection of parameter is
        required. Specify as a list
    :type par_list: List[str]
    :return: bumps Parameters list
    :rtype: List[BumpsParameter]
    """
    if par_list is None:
        # Assume that we have a ObjBase for which we can obtain a list
        par_list = self._object.get_fit_parameters()
    pars_obj = [self.__class__.convert_to_par_object(obj) for obj in par_list]
    return pars_obj
fit(x, y, weights, model=None, parameters=None, method=None, tolerance=None, max_evaluations=None, progress_callback=None, minimizer_kwargs=None, engine_kwargs=None, **kwargs)

Perform a fit using the BUMPS engine.

:param x: points to be calculated at :type x: np.ndarray :param y: measured points :type y: np.ndarray :param weights: Weights for supplied measured points :type weights: np.ndarray :param model: Optional Model which is being fitted to :param parameters: Optional parameters for the fit :type parameters: List[BumpsParameter] :param method: Method for minimization :type method: str :param max_evaluations: Maximum number of optimizer steps. Forwarded to BUMPS as its steps parameter. If None, the default value defined by the selected BUMPS fitter (fitclass.settings) is used. :type max_evaluations: int | None :param progress_callback: Optional callback for progress updates. The payload field iteration carries the BUMPS optimizer step index. :type progress_callback: Callable

.. note:: The :class:FitResults field n_evaluations reports the number of objective-function evaluations consumed by the fit, for cross-backend consistency with LMFit (nfev) and DFO-LS (nf). For BUMPS this is distinct from the optimizer step count that max_evaluations (i.e. BUMPS steps) is budgeted against and returned as :class:FitResults.iterations; a single step may trigger several objective evaluations, so n_evaluations can legitimately exceed max_evaluations. The budget-exhaustion check is performed against iterations, not n_evaluations.

:return: Fit results :rtype: FitResults

Source code in src/easyscience/fitting/minimizers/minimizer_bumps.py
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
def fit(
    self,
    x: np.ndarray,
    y: np.ndarray,
    weights: np.ndarray,
    model: Callable | None = None,
    parameters: List[Parameter] | None = None,
    method: str | None = None,
    tolerance: float | None = None,
    max_evaluations: int | None = None,
    progress_callback: Callable[[dict], bool | None] | None = None,
    minimizer_kwargs: dict | None = None,
    engine_kwargs: dict | None = None,
    **kwargs,
) -> FitResults:
    """Perform a fit using the BUMPS engine.

    :param x: points to be calculated at
    :type x: np.ndarray
    :param y: measured points
    :type y: np.ndarray
    :param weights: Weights for supplied measured points
    :type weights: np.ndarray
    :param model: Optional Model which is being fitted to
    :param parameters: Optional parameters for the fit
    :type parameters: List[BumpsParameter]
    :param method: Method for minimization
    :type method: str
    :param max_evaluations: Maximum number of optimizer steps. Forwarded to BUMPS as
        its ``steps`` parameter. If ``None``, the default value defined by the
        selected BUMPS fitter (``fitclass.settings``) is used.
    :type max_evaluations: int | None
    :param progress_callback: Optional callback for progress updates. The payload
        field ``iteration`` carries the BUMPS optimizer step index.
    :type progress_callback: Callable

    .. note::
        The :class:`FitResults` field ``n_evaluations`` reports the number of
        **objective-function evaluations** consumed by the fit, for cross-backend
        consistency with LMFit (``nfev``) and DFO-LS (``nf``). For BUMPS this is
        distinct from the optimizer **step count** that ``max_evaluations`` (i.e.
        BUMPS ``steps``) is budgeted against and returned as
        :class:`FitResults.iterations`; a single step may trigger several objective
        evaluations, so ``n_evaluations`` can legitimately exceed
        ``max_evaluations``. The budget-exhaustion check is performed against
        ``iterations``, not ``n_evaluations``.

    :return: Fit results
    :rtype: FitResults
    """
    method_dict = self._get_method_kwargs(method)

    x, y, weights = np.asarray(x), np.asarray(y), np.asarray(weights)

    if y.shape != x.shape:
        raise ValueError('x and y must have the same shape.')

    if weights.shape != x.shape:
        raise ValueError('Weights must have the same shape as x and y.')

    if not np.isfinite(weights).all():
        raise ValueError('Weights cannot be NaN or infinite.')

    if (weights <= 0).any():
        raise ValueError('Weights must be strictly positive and non-zero.')

    if engine_kwargs is None:
        engine_kwargs = {}

    if minimizer_kwargs is None:
        minimizer_kwargs = {}
    minimizer_kwargs.update(engine_kwargs)

    method_str = method_dict.get('method', self._method)
    fitclass = self._resolve_fitclass(method_str)

    # Resolve BUMPS-native defaults so the budget reported back to the caller (and
    # used by the budget-exhaustion check in `_gen_fit_results`) reflects the values
    # actually consumed by the fitter, even when the caller passes None.
    fitter_settings = dict(fitclass.settings)
    if max_evaluations is None:
        max_evaluations = fitter_settings.get('steps')
    if tolerance is None:
        ftol = fitter_settings.get('ftol')
        xtol = fitter_settings.get('xtol')
        tols = [t for t in (ftol, xtol) if t is not None]
        tolerance = min(tols) if tols else None

    if tolerance is not None:
        minimizer_kwargs['ftol'] = tolerance  # tolerance for change in function value
        minimizer_kwargs['xtol'] = (
            tolerance  # tolerance for change in parameter value, could be an independent value
        )
    if max_evaluations is not None:
        minimizer_kwargs['steps'] = max_evaluations

    if model is None:
        model_function = self._make_model(parameters=parameters)
        model = model_function(x, y, weights)
    self._cached_model = model

    self._p_0 = {f'p{key}': self._cached_pars[key].value for key in self._cached_pars.keys()}

    problem = FitProblem(model)

    monitors = []
    if progress_callback is not None:
        if not callable(progress_callback):
            raise ValueError('progress_callback must be callable')
        monitors.append(
            BumpsProgressMonitor(problem, progress_callback, self._build_progress_payload)
        )

    driver = FitDriver(
        fitclass=fitclass,
        problem=problem,
        monitors=monitors,
        **minimizer_kwargs,
        **kwargs,
    )
    driver.clip()

    # Why do we do this? Because a fitting template has to have global_object instantiated outside pre-runtime
    from easyscience import global_object

    stack_status = global_object.stack.enabled
    global_object.stack.enabled = False

    try:
        # Drive the fit through the local FitDriver instance so the supplied
        # `monitors` (including the optional progress callback monitor) are
        # invoked. `bumps.fitters.fit` constructs its own driver.
        x, fx = driver.fit()
        from scipy.optimize import OptimizeResult

        # BUMPS' `MonitorRunner.history.step` is populated by the driver itself
        # (independently of any user-supplied monitors) and exposes the canonical
        # last-step index reached by the fitter, so we use it as `nit`.
        history_step = getattr(getattr(driver, 'monitor_runner', None), 'history', None)
        nit_value = int(history_step.step[0]) if history_step is not None else None
        model_results = OptimizeResult(
            x=x,
            dx=driver.stderr(),
            fun=fx,
            success=True,
            status=0,
            message='successful termination',
            nit=nit_value,
        )
        model_results.state = driver.fitter.state
        self._set_parameter_fit_result(model_results, stack_status, problem._parameters)
        results = self._gen_fit_results(
            model_results,
            max_evaluations=max_evaluations,
            tolerance=tolerance,
        )
    except Exception as e:
        self._restore_parameter_values()
        raise FitError(e)
    finally:
        global_object.stack.enabled = stack_status
    return results

minimizer_dfo

DFO

Bases: MinimizerBase

This is a wrapper to Derivative Free Optimisation for Least Square: https://numericalalgorithmsgroup.github.io/dfols/

Source code in src/easyscience/fitting/minimizers/minimizer_dfo.py
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
class DFO(MinimizerBase):
    """
    This is a wrapper to Derivative Free Optimisation for Least Square: https://numericalalgorithmsgroup.github.io/dfols/
    """

    package = 'dfo'

    def __init__(
        self,
        obj,  #: ObjBase,
        fit_function: Callable,
        minimizer_enum: AvailableMinimizers | None = None,
    ):  # todo after constraint changes, add type hint: obj: ObjBase  # noqa: E501
        """Initialize the fitting engine with a `ObjBase` and an
        arbitrary fitting function.

        :param obj: Object containing elements of the `Parameter` class
        :type obj: ObjBase
        :param fit_function: function that when called returns y values. 'x' must be the first
                            and only positional argument. Additional values can be supplied by
                            keyword/value pairs
        :type fit_function: Callable
        """
        super().__init__(obj=obj, fit_function=fit_function, minimizer_enum=minimizer_enum)
        self._p_0 = {}

    @staticmethod
    def supported_methods() -> List[str]:
        return ['leastsq']

    @staticmethod
    def all_methods() -> List[str]:
        return ['leastsq']

    def fit(
        self,
        x: np.ndarray,
        y: np.ndarray,
        weights: np.ndarray,
        model: Callable | None = None,
        parameters: List[Parameter] | None = None,
        method: str | None = None,
        tolerance: float | None = None,
        max_evaluations: int | None = None,
        progress_callback: Callable[[dict], bool | None] | None = None,
        callback: Callable[[DFOCallbackState], None] | None = None,
        **kwargs,
    ) -> FitResults:
        """Perform a fit using the DFO-ls engine.

        :param x: points to be calculated at
        :type x: np.ndarray
        :param y: measured points
        :type y: np.ndarray
        :param weights: Weights for supplied measured points.
        :type weights: np.ndarray
        :param model: Optional Model which is being fitted to
        :type model: lmModel
        :param parameters: Optional parameters for the fit
        :type parameters: List[bumpsParameter]
        :param kwargs: Additional arguments for the fitting function.
        :param method: Method for minimization
        :type method: str
        :return: Fit results
        :rtype: ModelResult For standard least squares, the weights
            should be 1/sigma, where sigma is the standard deviation of
            the measurement. For unweighted least squares, these should
            be 1.
        """
        x, y, weights = np.asarray(x), np.asarray(y), np.asarray(weights)

        if y.shape != x.shape:
            raise ValueError('x and y must have the same shape.')

        if weights.shape != x.shape:
            raise ValueError('Weights must have the same shape as x and y.')

        if not np.isfinite(weights).all():
            raise ValueError('Weights cannot be NaN or infinite.')

        if (weights <= 0).any():
            raise ValueError('Weights must be strictly positive and non-zero.')

        # Bridge progress_callback into the DFO callback mechanism
        if progress_callback is not None and callback is None:
            callback = self._make_progress_adapter(progress_callback)

        if model is None:
            model_function = self._make_model(
                parameters=parameters,
                callback=callback,
            )
            model = model_function(x, y, weights)
        elif callback is not None:
            model = self._wrap_model_with_callback(
                model,
                self._get_callback_parameter_names(parameters),
                callback,
            )
        self._cached_model = model
        self._cached_model.x = x
        self._cached_model.y = y

        self._p_0 = {f'p{key}': self._cached_pars[key].value for key in self._cached_pars.keys()}

        # Why do we do this? Because a fitting template has to have global_object instantiated outside pre-runtime
        from easyscience import global_object

        stack_status = global_object.stack.enabled
        global_object.stack.enabled = False

        kwargs = self._prepare_kwargs(tolerance, max_evaluations, **kwargs)

        try:
            model_results = self._dfo_fit(self._cached_pars, model, **kwargs)
            self._set_parameter_fit_result(model_results, stack_status)
            results = self._gen_fit_results(model_results, weights)
        except FitError:
            self._restore_parameter_values()
            raise
        except Exception as e:
            self._restore_parameter_values()
            raise FitError(e)
        finally:
            global_object.stack.enabled = stack_status
        return results

    def convert_to_pars_obj(self, par_list: List[Parameter] | None = None):
        """Required by interface but not needed for DFO-LS."""
        pass

    @staticmethod
    def convert_to_par_object(obj) -> None:
        """Required by interface but not needed for DFO-LS."""
        pass

    def _make_model(
        self,
        parameters: List[Parameter] | None = None,
        callback: Callable[[DFOCallbackState], None] | None = None,
    ) -> Callable:
        """Generate a model from the supplied `fit_function` and
        parameters in the base object. Note that this makes a callable
        as it needs to be initialized with *x*, *y*, *weights*

        :return: Callable model which returns residuals
        :rtype: Callable
        """
        fit_func = self._generate_fit_function()

        def _outer(obj: DFO):
            def _make_func(x, y, weights):
                dfo_pars = {}
                if not parameters:
                    for name, par in obj._cached_pars.items():
                        dfo_pars[MINIMIZER_PARAMETER_PREFIX + str(name)] = par.value
                else:
                    for par in parameters:
                        dfo_pars[MINIMIZER_PARAMETER_PREFIX + par.unique_name] = par.value

                def _residuals(pars_values: List[float]) -> np.ndarray:
                    for idx, par_name in enumerate(dfo_pars.keys()):
                        dfo_pars[par_name] = pars_values[idx]
                    return (y - fit_func(x, **dfo_pars)) * weights

                return obj._wrap_model_with_callback(
                    _residuals,
                    list(dfo_pars.keys()),
                    callback,
                )

            return _make_func

        return _outer(self)

    def _get_callback_parameter_names(
        self, parameters: List[Parameter] | None = None
    ) -> list[str]:
        if parameters is not None:
            return [MINIMIZER_PARAMETER_PREFIX + parameter.unique_name for parameter in parameters]
        return [MINIMIZER_PARAMETER_PREFIX + name for name in self._cached_pars.keys()]

    @staticmethod
    def _wrap_model_with_callback(
        model: Callable,
        parameter_names: list[str],
        callback: Callable[[DFOCallbackState], None] | None,
    ) -> Callable:
        if callback is None:
            return model

        evaluation = 0
        best_objective = np.inf
        best_xk = np.array([], dtype=float)
        best_parameters: dict[str, float] = {}

        def wrapped_model(pars_values: List[float]) -> np.ndarray:
            nonlocal evaluation, best_objective, best_xk, best_parameters

            residuals = np.asarray(model(pars_values), dtype=float)
            xk = np.asarray(pars_values, dtype=float).copy()
            parameters = {name: value for name, value in zip(parameter_names, xk)}
            objective = float(np.dot(residuals.ravel(), residuals.ravel()))

            evaluation += 1
            improved = objective < best_objective
            if improved:
                best_objective = objective
                best_xk = xk.copy()
                best_parameters = parameters.copy()

            callback(
                DFOCallbackState(
                    evaluation=evaluation,
                    xk=xk,
                    residuals=residuals.copy(),
                    objective=objective,
                    parameters=parameters,
                    best_xk=best_xk.copy(),
                    best_objective=best_objective,
                    best_parameters=best_parameters.copy(),
                    improved=improved,
                )
            )

            return residuals

        return wrapped_model

    @staticmethod
    def _make_progress_adapter(
        progress_callback: Callable[[dict], bool | None],
    ) -> Callable[['DFOCallbackState'], None]:
        """Create a DFO callback that translates DFOCallbackState into
        the standard progress_callback dict format used by the GUI.

        :param progress_callback: Standard progress callback (dict ->
            bool|None)
        :return: DFO-compatible callback
        """

        def adapter(state: 'DFOCallbackState') -> None:
            chi2 = state.best_objective
            dof = max(np.asarray(state.residuals).size - len(state.best_parameters), 1)
            reduced_chi2 = chi2 / dof if dof > 0 else chi2
            param_snapshot = {
                name[len(MINIMIZER_PARAMETER_PREFIX) :]: float(val)
                for name, val in state.best_parameters.items()
            }
            payload = {
                'iteration': state.evaluation,
                'chi2': chi2,
                'reduced_chi2': reduced_chi2,
                'parameter_values': param_snapshot,
                'refresh_plots': False,
                'finished': False,
            }
            progress_callback(payload)

        return adapter

    def _set_parameter_fit_result(self, fit_result, stack_status, ci: float = 0.95) -> None:
        """Update parameters to their final values and assign a std
        error to them.

        :param fit_result: Fit object which contains info on the fit
        :param ci: Confidence interval for calculating errors. Default
            95%
        :return: None
        :rtype: noneType
        """
        from easyscience import global_object

        pars = self._cached_pars
        if stack_status:
            self._restore_parameter_values()
            global_object.stack.enabled = True
            global_object.stack.beginMacro('Fitting routine')

        error_matrix = self._error_from_jacobian(fit_result.jacobian, fit_result.resid, ci)
        for idx, par in enumerate(pars.values()):
            par.value = fit_result.x[idx]
            par.error = error_matrix[idx, idx]

        if stack_status:
            global_object.stack.endMacro()

    def _gen_fit_results(self, fit_results, weights, **kwargs) -> FitResults:
        """Convert fit results into the unified `FitResults` format.

        :param fit_result: Fit object which contains info on the fit
        :return: fit results container
        :rtype: FitResults
        """

        results = FitResults()
        for name, value in kwargs.items():
            if getattr(results, name, False):
                setattr(results, name, value)
        # DFO-LS stores fixed exit-code constants on each result object;
        # EXIT_SUCCESS is 0 and EXIT_MAXFUN_WARNING keeps a different flag value.
        results.success = fit_results.flag == fit_results.EXIT_SUCCESS
        if fit_results.flag == fit_results.EXIT_MAXFUN_WARNING:
            warnings.warn(str(fit_results.msg), UserWarning)

        pars = {}
        for p_name, par in self._cached_pars.items():
            pars[f'p{p_name}'] = par.value
        results.p = pars

        results.p0 = self._p_0
        results.x = self._cached_model.x
        results.y_obs = self._cached_model.y
        results.y_calc = self.evaluate(results.x, minimizer_parameters=results.p)
        # `weights` here are 1/sigma (residuals are multiplied by them in `_make_model`).
        # `FitResults.chi2` divides residuals by `y_err`, so `y_err` must be sigma, not the weight.
        results.y_err = 1 / np.asarray(weights)
        results.n_evaluations = int(fit_results.nf)
        results.iterations = self._extract_iterations(fit_results)
        results.message = str(fit_results.msg)
        if not results.success:
            warning_message = results.message or 'DFO fit did not succeed.'
            warnings.warn(warning_message, UserWarning, stacklevel=2)
        # results.residual = results.y_obs - results.y_calc
        # results.goodness_of_fit = fit_results.f

        results.minimizer_engine = self.__class__
        results.fit_args = None
        results.engine_result = fit_results
        # results.check_sanity()

        return results

    @staticmethod
    def _extract_iterations(fit_results) -> int | None:
        diagnostic_info = getattr(fit_results, 'diagnostic_info', None)
        if diagnostic_info is None:
            return None

        if isinstance(diagnostic_info, dict):
            values = diagnostic_info.get('iters_total')
            if values is None or len(values) == 0:
                return None
            return int(values[-1])

        columns = getattr(diagnostic_info, 'columns', ())
        if 'iters_total' not in columns:
            return None

        series = diagnostic_info['iters_total'].dropna()
        if series.empty:
            return None
        return int(series.iloc[-1])

    @staticmethod
    def _dfo_fit(
        pars: Dict[str, Parameter],
        model: Callable,
        **kwargs,
    ):
        """Method to convert EasyScience styling to DFO-LS styling (yes,
        again)

        :param model: Model which accepts f(x[0])
        :type model: Callable
        :param kwargs: Any additional arguments for dfols.solver
        :type kwargs: dict
        :return: dfols fit results container
        """

        pars_values = np.array([par.value for par in pars.values()])

        bounds = (
            np.array([par.min for par in pars.values()]),
            np.array([par.max for par in pars.values()]),
        )
        # https://numericalalgorithmsgroup.github.io/dfols/build/html/userguide.html
        if not np.isinf(bounds).any():
            # It is only possible to scale (normalize) variables if they are bound (different from inf)
            kwargs['scaling_within_bounds'] = True

        results = dfols.solve(model, pars_values, bounds=bounds, **kwargs)

        # DFO-LS uses EXIT_MAXFUN_WARNING when it stops on the evaluation budget;
        # we still return the partial fit result and let the unified result mark it as non-success.
        if results.flag in {results.EXIT_SUCCESS, results.EXIT_MAXFUN_WARNING}:
            return results

        raise FitError(f'Fit failed with message: {results.msg}')

    @staticmethod
    def _prepare_kwargs(
        tolerance: float | None = None,
        max_evaluations: int | None = None,
        **kwargs,
    ) -> dict[str:str]:
        if max_evaluations is not None:
            kwargs['maxfun'] = max_evaluations  # max number of function evaluations
        if tolerance is not None:
            if 0.1 < tolerance:  # dfo module throws errer if larger value
                raise ValueError('Tolerance must be equal or smaller than 0.1')
            kwargs['rhoend'] = tolerance  # size of the trust region
        user_params = dict(kwargs.get('user_params') or {})
        user_params['logging.save_diagnostic_info'] = True
        kwargs['user_params'] = user_params
        return kwargs
__init__(obj, fit_function, minimizer_enum=None)

Initialize the fitting engine with a ObjBase and an arbitrary fitting function.

:param obj: Object containing elements of the Parameter class :type obj: ObjBase :param fit_function: function that when called returns y values. 'x' must be the first and only positional argument. Additional values can be supplied by keyword/value pairs :type fit_function: Callable

Source code in src/easyscience/fitting/minimizers/minimizer_dfo.py
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
def __init__(
    self,
    obj,  #: ObjBase,
    fit_function: Callable,
    minimizer_enum: AvailableMinimizers | None = None,
):  # todo after constraint changes, add type hint: obj: ObjBase  # noqa: E501
    """Initialize the fitting engine with a `ObjBase` and an
    arbitrary fitting function.

    :param obj: Object containing elements of the `Parameter` class
    :type obj: ObjBase
    :param fit_function: function that when called returns y values. 'x' must be the first
                        and only positional argument. Additional values can be supplied by
                        keyword/value pairs
    :type fit_function: Callable
    """
    super().__init__(obj=obj, fit_function=fit_function, minimizer_enum=minimizer_enum)
    self._p_0 = {}
convert_to_par_object(obj) staticmethod

Required by interface but not needed for DFO-LS.

Source code in src/easyscience/fitting/minimizers/minimizer_dfo.py
170
171
172
173
@staticmethod
def convert_to_par_object(obj) -> None:
    """Required by interface but not needed for DFO-LS."""
    pass
convert_to_pars_obj(par_list=None)

Required by interface but not needed for DFO-LS.

Source code in src/easyscience/fitting/minimizers/minimizer_dfo.py
166
167
168
def convert_to_pars_obj(self, par_list: List[Parameter] | None = None):
    """Required by interface but not needed for DFO-LS."""
    pass
fit(x, y, weights, model=None, parameters=None, method=None, tolerance=None, max_evaluations=None, progress_callback=None, callback=None, **kwargs)

Perform a fit using the DFO-ls engine.

:param x: points to be calculated at :type x: np.ndarray :param y: measured points :type y: np.ndarray :param weights: Weights for supplied measured points. :type weights: np.ndarray :param model: Optional Model which is being fitted to :type model: lmModel :param parameters: Optional parameters for the fit :type parameters: List[bumpsParameter] :param kwargs: Additional arguments for the fitting function. :param method: Method for minimization :type method: str :return: Fit results :rtype: ModelResult For standard least squares, the weights should be 1/sigma, where sigma is the standard deviation of the measurement. For unweighted least squares, these should be 1.

Source code in src/easyscience/fitting/minimizers/minimizer_dfo.py
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
def fit(
    self,
    x: np.ndarray,
    y: np.ndarray,
    weights: np.ndarray,
    model: Callable | None = None,
    parameters: List[Parameter] | None = None,
    method: str | None = None,
    tolerance: float | None = None,
    max_evaluations: int | None = None,
    progress_callback: Callable[[dict], bool | None] | None = None,
    callback: Callable[[DFOCallbackState], None] | None = None,
    **kwargs,
) -> FitResults:
    """Perform a fit using the DFO-ls engine.

    :param x: points to be calculated at
    :type x: np.ndarray
    :param y: measured points
    :type y: np.ndarray
    :param weights: Weights for supplied measured points.
    :type weights: np.ndarray
    :param model: Optional Model which is being fitted to
    :type model: lmModel
    :param parameters: Optional parameters for the fit
    :type parameters: List[bumpsParameter]
    :param kwargs: Additional arguments for the fitting function.
    :param method: Method for minimization
    :type method: str
    :return: Fit results
    :rtype: ModelResult For standard least squares, the weights
        should be 1/sigma, where sigma is the standard deviation of
        the measurement. For unweighted least squares, these should
        be 1.
    """
    x, y, weights = np.asarray(x), np.asarray(y), np.asarray(weights)

    if y.shape != x.shape:
        raise ValueError('x and y must have the same shape.')

    if weights.shape != x.shape:
        raise ValueError('Weights must have the same shape as x and y.')

    if not np.isfinite(weights).all():
        raise ValueError('Weights cannot be NaN or infinite.')

    if (weights <= 0).any():
        raise ValueError('Weights must be strictly positive and non-zero.')

    # Bridge progress_callback into the DFO callback mechanism
    if progress_callback is not None and callback is None:
        callback = self._make_progress_adapter(progress_callback)

    if model is None:
        model_function = self._make_model(
            parameters=parameters,
            callback=callback,
        )
        model = model_function(x, y, weights)
    elif callback is not None:
        model = self._wrap_model_with_callback(
            model,
            self._get_callback_parameter_names(parameters),
            callback,
        )
    self._cached_model = model
    self._cached_model.x = x
    self._cached_model.y = y

    self._p_0 = {f'p{key}': self._cached_pars[key].value for key in self._cached_pars.keys()}

    # Why do we do this? Because a fitting template has to have global_object instantiated outside pre-runtime
    from easyscience import global_object

    stack_status = global_object.stack.enabled
    global_object.stack.enabled = False

    kwargs = self._prepare_kwargs(tolerance, max_evaluations, **kwargs)

    try:
        model_results = self._dfo_fit(self._cached_pars, model, **kwargs)
        self._set_parameter_fit_result(model_results, stack_status)
        results = self._gen_fit_results(model_results, weights)
    except FitError:
        self._restore_parameter_values()
        raise
    except Exception as e:
        self._restore_parameter_values()
        raise FitError(e)
    finally:
        global_object.stack.enabled = stack_status
    return results

DFOCallbackState dataclass

Snapshot of a DFO objective evaluation.

Source code in src/easyscience/fitting/minimizers/minimizer_dfo.py
24
25
26
27
28
29
30
31
32
33
34
35
36
@dataclass(frozen=True)
class DFOCallbackState:
    """Snapshot of a DFO objective evaluation."""

    evaluation: int
    xk: np.ndarray
    residuals: np.ndarray
    objective: float
    parameters: dict[str, float]
    best_xk: np.ndarray
    best_objective: float
    best_parameters: dict[str, float]
    improved: bool

minimizer_lmfit

LMFit

Bases: MinimizerBase

This is a wrapper to the extended Levenberg-Marquardt Fit: https://lmfit.github.io/lmfit-py/ It allows for the lmfit fitting engine to use parameters declared in an EasyScience.base_classes.ObjBase.

Source code in src/easyscience/fitting/minimizers/minimizer_lmfit.py
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
class LMFit(MinimizerBase):  # noqa: S101
    """
    This is a wrapper to the extended Levenberg-Marquardt Fit: https://lmfit.github.io/lmfit-py/
    It allows for the lmfit fitting engine to use parameters declared in an `EasyScience.base_classes.ObjBase`.
    """

    package = 'lmfit'

    def __init__(
        self,
        obj,  #: ObjBase,
        fit_function: Callable,
        minimizer_enum: AvailableMinimizers | None = None,
    ):  # todo after constraint changes, add type hint: obj: ObjBase  # noqa: E501
        """Initialize the minimizer with the `ObjBase` and the
        `fit_function` to be used.

        :param obj: Base object which contains the parameters to be
            fitted
        :type obj: ObjBase
        :param fit_function: Function which will be fitted to the data
        :type fit_function: Callable
        :param method: Method to be used by the minimizer
        :type method: str
        """
        super().__init__(obj=obj, fit_function=fit_function, minimizer_enum=minimizer_enum)
        self._last_iteration: int | None = None

    @staticmethod
    def all_methods() -> List[str]:
        return [
            'least_squares',
            'leastsq',
            'differential_evolution',
            'basinhopping',
            'ampgo',
            'nelder',
            'lbfgsb',
            'powell',
            'cg',
            'newton',
            'cobyla',
            'bfgs',
        ]

    @staticmethod
    def supported_methods() -> List[str]:
        return [
            'least_squares',
            'leastsq',
            'differential_evolution',
            'powell',
            'cobyla',
        ]

    def fit(
        self,
        x: np.ndarray,
        y: np.ndarray,
        weights: np.ndarray = None,
        model: LMModel | None = None,
        parameters: LMParameters | None = None,
        method: str | None = None,
        tolerance: float | None = None,
        max_evaluations: int | None = None,
        progress_callback: Callable[[dict], bool | None] | None = None,
        minimizer_kwargs: dict | None = None,
        engine_kwargs: dict | None = None,
        **kwargs,
    ) -> FitResults:
        """Perform a fit using the lmfit engine.

        :param method:
        :type method:
        :param x: points to be calculated at
        :type x: np.ndarray
        :param y: measured points
        :type y: np.ndarray
        :param weights: Weights for supplied measured points
        :type weights: np.ndarray
        :param model: Optional Model which is being fitted to
        :type model: LMModel
        :param parameters: Optional parameters for the fit
        :type parameters: LMParameters
        :param minimizer_kwargs: Arguments to be passed directly to the
            minimizer
        :type minimizer_kwargs: dict
        :param kwargs: Additional arguments for the fitting function.
        :return: Fit results
        :rtype: ModelResult For standard least squares, the weights
            should be 1/sigma, where sigma is the standard deviation of
            the measurement. For unweighted least squares, these should
            be 1.
        """
        x, y, weights = np.asarray(x), np.asarray(y), np.asarray(weights)

        if y.shape != x.shape:
            raise ValueError('x and y must have the same shape.')

        if weights.shape != x.shape:
            raise ValueError('Weights must have the same shape as x and y.')

        if not np.isfinite(weights).all():
            raise ValueError('Weights cannot be NaN or infinite.')

        if (weights <= 0).any():
            raise ValueError('Weights must be strictly positive and non-zero.')

        if engine_kwargs is None:
            engine_kwargs = {}

        method_kwargs = self._get_method_kwargs(method)
        fit_kws_dict = self._get_fit_kws(method, tolerance, minimizer_kwargs)

        # Why do we do this? Because a fitting template has to have global_object instantiated outside pre-runtime
        from easyscience import global_object

        stack_status = global_object.stack.enabled
        global_object.stack.enabled = False

        try:
            if model is None:
                model = self._make_model()

            self._last_iteration = None
            iter_cb = self._create_iter_callback(progress_callback)
            model_results = model.fit(
                y,
                x=x,
                weights=weights,
                max_nfev=max_evaluations,
                iter_cb=iter_cb,
                fit_kws=fit_kws_dict,
                **method_kwargs,
                **engine_kwargs,
                **kwargs,
            )
            self._set_parameter_fit_result(model_results, stack_status)
            results = self._gen_fit_results(model_results, iterations=self._last_iteration)
        except Exception as e:
            self._restore_parameter_values()
            raise FitError(e)
        finally:
            global_object.stack.enabled = stack_status
        return results

    def _create_iter_callback(
        self,
        progress_callback: Callable[[dict], bool | None] | None,
    ) -> Callable | None:
        def iter_cb(params, iteration: int, residuals: np.ndarray, *args, **kwargs) -> bool:
            if iteration >= 0:
                self._last_iteration = int(iteration)
            if progress_callback is None:
                return False
            payload = self._build_progress_payload(params, iteration, residuals)
            progress_callback(payload)
            return False

        return iter_cb

    def _build_progress_payload(self, params, iteration: int, residuals: np.ndarray) -> dict:
        residual_array = np.asarray(residuals)
        chi2 = float(np.square(residual_array).sum())
        varied_parameter_count = sum(
            1 for parameter in params.values() if getattr(parameter, 'vary', False)
        )
        degrees_of_freedom = residual_array.size - varied_parameter_count
        reduced_chi2 = chi2 / degrees_of_freedom if degrees_of_freedom > 0 else chi2

        parameter_values = {
            parameter_name[len(MINIMIZER_PARAMETER_PREFIX) :]: float(parameter.value)
            for parameter_name, parameter in params.items()
            if parameter_name.startswith(MINIMIZER_PARAMETER_PREFIX)
        }
        for parameter_name, parameter in self._cached_pars.items():
            lmfit_parameter_name = f'{MINIMIZER_PARAMETER_PREFIX}{parameter_name}'
            if lmfit_parameter_name not in params:
                parameter_values[parameter_name] = float(parameter.value)

        return {
            'iteration': int(iteration),
            'chi2': chi2,
            'reduced_chi2': reduced_chi2,
            'parameter_values': parameter_values,
            'refresh_plots': False,
            'finished': False,
        }

    def _get_fit_kws(
        self, method: str, tolerance: float, minimizer_kwargs: dict[str:str]
    ) -> dict[str:str]:
        if minimizer_kwargs is None:
            minimizer_kwargs = {}
        if tolerance is not None:
            if method in [None, 'least_squares', 'leastsq']:
                minimizer_kwargs['ftol'] = tolerance
            if method in ['differential_evolution', 'powell', 'cobyla']:
                minimizer_kwargs['tol'] = tolerance
        return minimizer_kwargs

    def convert_to_pars_obj(self, parameters: List[Parameter] | None = None) -> LMParameters:
        """Create an lmfit compatible container with the `Parameters`
        converted from the base object.

        :param parameters: If only a single/selection of parameter is
            required. Specify as a list
        :return: lmfit Parameters compatible object
        """
        if parameters is None:
            # Assume that we have a ObjBase for which we can obtain a list
            parameters = self._object.get_fit_parameters()
        lm_parameters = LMParameters().add_many([
            self.convert_to_par_object(parameter) for parameter in parameters
        ])
        return lm_parameters

    @staticmethod
    def convert_to_par_object(parameter: Parameter) -> LMParameter:
        """Convert an EasyScience Parameter object to a lmfit Parameter
        object.

        :return: lmfit Parameter compatible object.
        :rtype: LMParameter
        """
        value = parameter.value

        return LMParameter(
            MINIMIZER_PARAMETER_PREFIX + parameter.unique_name,
            value=value,
            vary=not parameter.fixed,
            min=parameter.min,
            max=parameter.max,
            expr=None,
            brute_step=None,
        )

    def _make_model(self, pars: LMParameters | None = None) -> LMModel:
        """Generate a lmfit model from the supplied `fit_function` and
        parameters in the base object.

        :return: Callable lmfit model
        :rtype: LMModel
        """
        # Generate the fitting function
        fit_func = self._generate_fit_function()

        self._fit_function = fit_func

        if pars is None:
            pars = self._cached_pars
        # Create the model
        model = LMModel(
            fit_func,
            independent_vars=['x'],
            param_names=[MINIMIZER_PARAMETER_PREFIX + str(key) for key in pars.keys()],
        )
        # Assign values from the `Parameter` to the model
        for name, item in pars.items():
            if isinstance(item, LMParameter):
                value = item.value
            else:
                value = item.value

            model.set_param_hint(
                MINIMIZER_PARAMETER_PREFIX + str(name),
                value=value,
                min=item.min,
                max=item.max,
            )

        # Cache the model for later reference
        self._cached_model = model
        return model

    def _set_parameter_fit_result(self, fit_result: ModelResult, stack_status: bool):
        """Update parameters to their final values and assign a std
        error to them.

        :param fit_result: Fit object which contains info on the fit
        :return: None
        :rtype: noneType
        """
        from easyscience import global_object

        pars = self._cached_pars
        if stack_status:
            self._restore_parameter_values()
            global_object.stack.enabled = True
            global_object.stack.beginMacro('Fitting routine')
        for name in pars.keys():
            pars[name].value = fit_result.params[MINIMIZER_PARAMETER_PREFIX + str(name)].value
            if fit_result.errorbars:
                pars[name].error = fit_result.params[MINIMIZER_PARAMETER_PREFIX + str(name)].stderr
            else:
                pars[name].error = 0.0
        if stack_status:
            global_object.stack.endMacro()

    def _gen_fit_results(self, fit_results: ModelResult, **kwargs) -> FitResults:
        """
        Convert fit results into the unified `FitResults` format.
        See https://github.com/lmfit/lmfit-py/blob/480072b9f7834b31ff2ca66277a5ad31246843a4/lmfit/model.py#L1272

        :param fit_result: Fit object which contains info on the fit
        :return: fit results container
        :rtype: FitResults
        """
        results = FitResults()
        for name, value in kwargs.items():
            if getattr(results, name, False):
                setattr(results, name, value)

        # We need to unify return codes......
        results.success = fit_results.success
        results.y_obs = fit_results.data
        # results.residual = fit_results.residual
        results.x = fit_results.userkws['x']
        results.p = fit_results.values
        results.p0 = fit_results.init_values
        # results.goodness_of_fit = fit_results.chisqr
        results.y_calc = fit_results.best_fit
        results.y_err = 1 / fit_results.weights
        results.n_evaluations = fit_results.nfev
        results.iterations = kwargs.get('iterations')
        results.message = fit_results.message
        if fit_results.success is False and fit_results.message:
            warnings.warn(str(fit_results.message), UserWarning)
        results.minimizer_engine = self.__class__
        results.fit_args = None

        results.engine_result = fit_results
        # results.check_sanity()
        return results
__init__(obj, fit_function, minimizer_enum=None)

Initialize the minimizer with the ObjBase and the fit_function to be used.

:param obj: Base object which contains the parameters to be fitted :type obj: ObjBase :param fit_function: Function which will be fitted to the data :type fit_function: Callable :param method: Method to be used by the minimizer :type method: str

Source code in src/easyscience/fitting/minimizers/minimizer_lmfit.py
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
def __init__(
    self,
    obj,  #: ObjBase,
    fit_function: Callable,
    minimizer_enum: AvailableMinimizers | None = None,
):  # todo after constraint changes, add type hint: obj: ObjBase  # noqa: E501
    """Initialize the minimizer with the `ObjBase` and the
    `fit_function` to be used.

    :param obj: Base object which contains the parameters to be
        fitted
    :type obj: ObjBase
    :param fit_function: Function which will be fitted to the data
    :type fit_function: Callable
    :param method: Method to be used by the minimizer
    :type method: str
    """
    super().__init__(obj=obj, fit_function=fit_function, minimizer_enum=minimizer_enum)
    self._last_iteration: int | None = None
convert_to_par_object(parameter) staticmethod

Convert an EasyScience Parameter object to a lmfit Parameter object.

:return: lmfit Parameter compatible object. :rtype: LMParameter

Source code in src/easyscience/fitting/minimizers/minimizer_lmfit.py
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
@staticmethod
def convert_to_par_object(parameter: Parameter) -> LMParameter:
    """Convert an EasyScience Parameter object to a lmfit Parameter
    object.

    :return: lmfit Parameter compatible object.
    :rtype: LMParameter
    """
    value = parameter.value

    return LMParameter(
        MINIMIZER_PARAMETER_PREFIX + parameter.unique_name,
        value=value,
        vary=not parameter.fixed,
        min=parameter.min,
        max=parameter.max,
        expr=None,
        brute_step=None,
    )
convert_to_pars_obj(parameters=None)

Create an lmfit compatible container with the Parameters converted from the base object.

:param parameters: If only a single/selection of parameter is required. Specify as a list :return: lmfit Parameters compatible object

Source code in src/easyscience/fitting/minimizers/minimizer_lmfit.py
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
def convert_to_pars_obj(self, parameters: List[Parameter] | None = None) -> LMParameters:
    """Create an lmfit compatible container with the `Parameters`
    converted from the base object.

    :param parameters: If only a single/selection of parameter is
        required. Specify as a list
    :return: lmfit Parameters compatible object
    """
    if parameters is None:
        # Assume that we have a ObjBase for which we can obtain a list
        parameters = self._object.get_fit_parameters()
    lm_parameters = LMParameters().add_many([
        self.convert_to_par_object(parameter) for parameter in parameters
    ])
    return lm_parameters
fit(x, y, weights=None, model=None, parameters=None, method=None, tolerance=None, max_evaluations=None, progress_callback=None, minimizer_kwargs=None, engine_kwargs=None, **kwargs)

Perform a fit using the lmfit engine.

:param method: :type method: :param x: points to be calculated at :type x: np.ndarray :param y: measured points :type y: np.ndarray :param weights: Weights for supplied measured points :type weights: np.ndarray :param model: Optional Model which is being fitted to :type model: LMModel :param parameters: Optional parameters for the fit :type parameters: LMParameters :param minimizer_kwargs: Arguments to be passed directly to the minimizer :type minimizer_kwargs: dict :param kwargs: Additional arguments for the fitting function. :return: Fit results :rtype: ModelResult For standard least squares, the weights should be 1/sigma, where sigma is the standard deviation of the measurement. For unweighted least squares, these should be 1.

Source code in src/easyscience/fitting/minimizers/minimizer_lmfit.py
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
def fit(
    self,
    x: np.ndarray,
    y: np.ndarray,
    weights: np.ndarray = None,
    model: LMModel | None = None,
    parameters: LMParameters | None = None,
    method: str | None = None,
    tolerance: float | None = None,
    max_evaluations: int | None = None,
    progress_callback: Callable[[dict], bool | None] | None = None,
    minimizer_kwargs: dict | None = None,
    engine_kwargs: dict | None = None,
    **kwargs,
) -> FitResults:
    """Perform a fit using the lmfit engine.

    :param method:
    :type method:
    :param x: points to be calculated at
    :type x: np.ndarray
    :param y: measured points
    :type y: np.ndarray
    :param weights: Weights for supplied measured points
    :type weights: np.ndarray
    :param model: Optional Model which is being fitted to
    :type model: LMModel
    :param parameters: Optional parameters for the fit
    :type parameters: LMParameters
    :param minimizer_kwargs: Arguments to be passed directly to the
        minimizer
    :type minimizer_kwargs: dict
    :param kwargs: Additional arguments for the fitting function.
    :return: Fit results
    :rtype: ModelResult For standard least squares, the weights
        should be 1/sigma, where sigma is the standard deviation of
        the measurement. For unweighted least squares, these should
        be 1.
    """
    x, y, weights = np.asarray(x), np.asarray(y), np.asarray(weights)

    if y.shape != x.shape:
        raise ValueError('x and y must have the same shape.')

    if weights.shape != x.shape:
        raise ValueError('Weights must have the same shape as x and y.')

    if not np.isfinite(weights).all():
        raise ValueError('Weights cannot be NaN or infinite.')

    if (weights <= 0).any():
        raise ValueError('Weights must be strictly positive and non-zero.')

    if engine_kwargs is None:
        engine_kwargs = {}

    method_kwargs = self._get_method_kwargs(method)
    fit_kws_dict = self._get_fit_kws(method, tolerance, minimizer_kwargs)

    # Why do we do this? Because a fitting template has to have global_object instantiated outside pre-runtime
    from easyscience import global_object

    stack_status = global_object.stack.enabled
    global_object.stack.enabled = False

    try:
        if model is None:
            model = self._make_model()

        self._last_iteration = None
        iter_cb = self._create_iter_callback(progress_callback)
        model_results = model.fit(
            y,
            x=x,
            weights=weights,
            max_nfev=max_evaluations,
            iter_cb=iter_cb,
            fit_kws=fit_kws_dict,
            **method_kwargs,
            **engine_kwargs,
            **kwargs,
        )
        self._set_parameter_fit_result(model_results, stack_status)
        results = self._gen_fit_results(model_results, iterations=self._last_iteration)
    except Exception as e:
        self._restore_parameter_values()
        raise FitError(e)
    finally:
        global_object.stack.enabled = stack_status
    return results

utils

FitResults

At the moment this is just a dummy way of unifying the returned fit parameters.

Source code in src/easyscience/fitting/minimizers/utils.py
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
class FitResults:
    """At the moment this is just a dummy way of unifying the returned
    fit parameters.
    """

    __slots__ = [
        'success',
        'minimizer_engine',
        'fit_args',
        'p',
        'p0',
        'x',
        'x_matrices',
        'y_obs',
        'y_calc',
        'y_err',
        'n_evaluations',
        'iterations',
        'message',
        'engine_result',
        'total_results',
    ]

    def __init__(self):
        self.success = False
        self.minimizer_engine = None
        self.fit_args = {}
        self.p = {}
        self.p0 = {}
        self.x = np.ndarray([])
        self.x_matrices = np.ndarray([])
        self.y_obs = np.ndarray([])
        self.y_calc = np.ndarray([])
        self.y_err = np.ndarray([])
        self.n_evaluations = None
        self.iterations = None
        self.message = ''
        self.engine_result = None
        self.total_results = None

    def __repr__(self) -> str:
        engine_name = self.minimizer_engine.__name__ if self.minimizer_engine else None
        try:
            chi2_val = self.chi2
            reduced_val = self.reduced_chi2
            if not np.isfinite(chi2_val) or not np.isfinite(reduced_val):
                raise ValueError('Chi2 or reduced chi2 is not finite')
            chi2 = f'{chi2_val:.4g}'
            reduced = f'{reduced_val:.4g}'
        except Exception:
            chi2 = 'N/A'
            reduced = 'N/A'

        try:
            n_points = len(self.x)
        except TypeError:
            n_points = 0

        lines = [
            f'FitResults(success={self.success}',
            f'  n_pars={self.n_pars}, n_points={n_points}',
            f'  chi2={chi2}, reduced_chi2={reduced}',
            f'  n_evaluations={self.n_evaluations}',
            f'  iterations={self.iterations}',
            f'  minimizer={engine_name}',
        ]
        if self.message:
            lines.append(f"  message='{self.message}'")
        if self.p:
            par_str = ', '.join(f'{k}={v:.4g}' for k, v in self.p.items())
            lines.append(f'  parameters={{{par_str}}}')
        lines.append(')')
        return '\n'.join(lines)

    @property
    def n_pars(self):
        return len(self.p)

    @property
    def residual(self):
        return self.y_obs - self.y_calc

    @property
    def chi2(self):
        return ((self.residual / self.y_err) ** 2).sum()

    @property
    def reduced_chi2(self):
        return self.chi2 / (len(self.x) - self.n_pars)

multi_fitter

MultiFitter

Bases: Fitter

Extension of Fitter to enable multiple dataset/fit function fitting.

We can fit these types of data simultaneously: - Multiple models on multiple datasets.

The inherited fit wrapper from Fitter is used unchanged, including support for forwarding progress callbacks to the active minimizer.

Source code in src/easyscience/fitting/multi_fitter.py
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
class MultiFitter(Fitter):
    """Extension of Fitter to enable multiple dataset/fit function
    fitting.

    We can fit these types of data simultaneously:
    - Multiple models on multiple datasets.

    The inherited ``fit`` wrapper from ``Fitter`` is used unchanged,
    including support for forwarding progress callbacks to the active
    minimizer.
    """

    def __init__(
        self,
        fit_objects: Optional[List] = None,
        fit_functions: Optional[List[Callable]] = None,
    ):
        # Create a dummy core object to hold all the fit objects.
        self._fit_objects = CollectionBase('multi', *fit_objects)
        self._fit_functions = fit_functions
        # Initialize with the first of the fit_functions, without this it is
        # not possible to change the fitting engine.
        super().__init__(self._fit_objects, self._fit_functions[0])

    def _fit_function_wrapper(self, real_x=None, flatten: bool = True) -> Callable:
        """Simple fit function which injects the N real X (independent)
        values into the optimizer function.

        This will also flatten the results if needed.
        :param real_x: List of independent x parameters to be injected
        :param flatten: Should the result be a flat 1D array?
        :return: Wrapped optimizer function.
        """
        # Extract of a list of callable functions
        wrapped_fns = []
        for this_x, this_fun in zip(real_x, self._fit_functions):
            self._fit_function = this_fun
            wrapped_fns.append(Fitter._fit_function_wrapper(self, this_x, flatten=flatten))

        def wrapped_fun(x, **kwargs):
            # Generate an empty Y based on x
            y = np.zeros_like(x)
            i = 0
            # Iterate through wrapped functions, passing the WRONG x, the correct
            # x was injected in the step above.
            for idx, dim in enumerate(self._dependent_dims):
                ep = i + np.prod(dim)
                y[i:ep] = wrapped_fns[idx](x, **kwargs)
                i = ep
            return y

        return wrapped_fun

    @staticmethod
    def _precompute_reshaping(
        x: List[np.ndarray],
        y: List[np.ndarray],
        weights: Optional[List[np.ndarray]],
        vectorized: bool,
    ):
        """Convert an array of X's and Y's  to an acceptable shape for
        fitting.

        :param x: List of independent variables.
        :param y: List of dependent variables.
        :param vectorized: Is the fn input vectorized or point based?
        :param kwargs: Additional kwy words.
        :return: Variables for optimization
        """
        if weights is None:
            weights = [None] * len(x)
        _, _x_new, _y_new, _weights, _dims = Fitter._precompute_reshaping(
            x[0], y[0], weights[0], vectorized
        )
        x_new = [_x_new]
        y_new = [_y_new]
        w_new = [_weights]
        dims = [_dims]
        for _x, _y, _w in zip(x[1::], y[1::], weights[1::]):
            _, _x_new, _y_new, _weights, _dims = Fitter._precompute_reshaping(
                _x, _y, _w, vectorized
            )
            x_new.append(_x_new)
            y_new.append(_y_new)
            w_new.append(_weights)
            dims.append(_dims)
        y_new = np.hstack(y_new)
        if w_new[0] is None:
            w_new = None
        else:
            w_new = np.hstack(w_new)
        x_fit = np.linspace(0, y_new.size - 1, y_new.size)
        return x_fit, x_new, y_new, w_new, dims

    def _post_compute_reshaping(
        self,
        fit_result_obj: FitResults,
        x: List[np.ndarray],
        y: List[np.ndarray],
    ) -> List[FitResults]:
        """Take a fit results object and split it into n chuncks based
        on the size of the x, y inputs :param fit_result_obj: Result
        from a multifit :param x: List of X co-ords :param y: List of Y
        co-ords :return: List of fit results.
        """

        cls = fit_result_obj.__class__
        sp = 0
        fit_results_list = []
        for idx, this_x in enumerate(x):
            # Create a new Results obj
            current_results = cls()
            ep = sp + int(np.array(self._dependent_dims[idx]).prod())

            #  Fill out the new result obj (see EasyScience.fitting.Fitting_template.FitResults)
            current_results.success = fit_result_obj.success
            current_results.minimizer_engine = fit_result_obj.minimizer_engine
            current_results.p = fit_result_obj.p
            current_results.p0 = fit_result_obj.p0
            current_results.n_evaluations = fit_result_obj.n_evaluations
            current_results.iterations = fit_result_obj.iterations
            current_results.message = fit_result_obj.message
            current_results.x = this_x
            current_results.y_obs = y[idx]
            current_results.y_calc = np.reshape(
                fit_result_obj.y_calc[sp:ep], current_results.y_obs.shape
            )
            current_results.y_err = np.reshape(
                fit_result_obj.y_err[sp:ep], current_results.y_obs.shape
            )
            current_results.engine_result = fit_result_obj.engine_result

            # Attach an additional field for the un-modified results
            current_results.total_results = fit_result_obj
            fit_results_list.append(current_results)
            sp = ep
        return fit_results_list