Coverage for local_installation_linux/mumott/optimization/optimizers/zonal_harmonics_optimizer.py: 87%
53 statements
« prev ^ index » next coverage.py v7.3.2, created at 2025-05-05 21:21 +0000
« prev ^ index » next coverage.py v7.3.2, created at 2025-05-05 21:21 +0000
1import sys
2import logging
3import numpy as np
4from numpy.typing import NDArray
5from typing import Dict, Any
6from mumott.optimization.optimizers.base_optimizer import Optimizer
7from mumott.optimization.loss_functions import SquaredLoss
8from mumott.methods.residual_calculators import ZHTTResidualCalculator
9import tqdm
10from mumott.core.hashing import list_to_hash
12logger = logging.getLogger(__name__)
15class ZHTTOptimizer(Optimizer):
16 r"""
17 Simple optimizer meant to be used in conjunction with :class:`ZHTTResidualCalculator`.
18 Cost function defined this way is non-convex, so this optimizer depends on being given a good
19 starting guess. Such a guess can be generated by a different model and then fitted to an
20 axially symmetric model.
22 For mode details, see the doncumentation of :class:`ZHTTResidualCalculator`.
25 Parameters
26 ----------
27 loss_function : LossFunction
28 The :ref:`loss function <loss_functions>` to be minimized using this algorithm.
29 x0
30 Initial guess for solution vector. Must be the same size as
31 :attr:`residual_calculator.coefficients`.
32 step_size : float
33 Step size for the gradient. A largest possible safe step-size will be estimated if none is
34 given.
35 kwargs : Dict[str, Any]
36 Miscellaneous options. See notes for valid entries.
38 Notes
39 -----
40 Valid entries in :attr:`kwargs` are
41 maxiter : int
42 Maximum number of iterations. Default value is ``20``.
43 """
45 def __init__(self,
46 loss_function: SquaredLoss,
47 x0: NDArray[float],
48 step_size_parameter=None,
49 **kwargs: Dict[str, Any]):
50 super().__init__(loss_function, **kwargs)
52 self._options['x0'] = x0
54 if not isinstance(self._loss_function._residual_calculator, ZHTTResidualCalculator):
55 raise NotImplementedError('This optimizer requires a ZHTTResidualCalculator'
56 ' instance for calculating the residual.')
58 if step_size_parameter is None:
59 logger.info('Since the step size has not been specified the largest safe step size will be'
60 'estimated. This calculation is approximate and does not take into account'
61 'regularization. There is therefore no guarantee of convergence.')
62 self._caluclate_safe_step_size_parameter()
63 else:
64 self._step_size_parameter = step_size_parameter
66 def _caluclate_safe_step_size_parameter(self):
67 """ Generate an estimated largest safe step-size for the optimization algorithm.
68 """
69 self._step_size_parameter = self._loss_function.get_estimate_of_lifschitz_constant()
71 def optimize(self):
72 # Default parameters
73 opt_kwargs = dict(maxiter=20,
74 x0=None)
75 # update with value from kwargs
76 for k in opt_kwargs:
77 if k in dict(self): 77 ↛ 76line 77 didn't jump to line 76, because the condition on line 77 was never false
78 opt_kwargs[k] = self[k] # Note: Base class has a __getitem__ that returns kwargs items
80 # Print warning for unrecognizes kwargs items
81 for k in dict(self):
82 if k not in opt_kwargs: 82 ↛ 83line 82 didn't jump to line 83, because the condition on line 82 was never true
83 logger.warning(f'Unknown option {k} with value {self[k]} will be ignored.')
85 # prepare optimization
86 x = opt_kwargs['x0']
87 loss_function_output = self._loss_function.get_loss(x,
88 gradient_part='full')
89 # Toggle between printing an error bar or not
90 if not self._no_tqdm: 90 ↛ 93line 90 didn't jump to line 93, because the condition on line 90 was never false
91 iterator = tqdm.tqdm(range(opt_kwargs['maxiter']), file=sys.stdout)
92 iterator.set_description(f"Loss = {loss_function_output['loss']:.2E}")
93 elif self._no_tqdm:
94 iterator = range(opt_kwargs['maxiter'])
96 for ii in iterator:
98 # Get gradient
99 loss_function_output = self._loss_function.get_loss(x,
100 get_gradient=True,
101 gradient_part='full')
103 # Angle step size has to scale with the absolute scale of the coefficients
104 x = self._loss_function._residual_calculator.coefficients
105 step_size_scale = np.array(x[..., 0])
106 step_size_scale[step_size_scale < 0] = 0
107 step_size_scale = step_size_scale**2 + 1e-15
109 x[..., :-2] = x[..., :-2] - self._step_size_parameter*loss_function_output['gradient'][..., :-2]
110 x[..., -2:] = x[..., -2:] - self._step_size_parameter/step_size_scale[..., np.newaxis]\
111 * loss_function_output['gradient'][..., -2:]
113 if not self._no_tqdm: 113 ↛ 96line 113 didn't jump to line 96, because the condition on line 113 was never false
114 iterator.set_description(f"Loss = {loss_function_output['loss']:.2E}")
116 loss_function_output = self._loss_function.get_loss(x,
117 gradient_part='full')
118 result = dict(x=x, loss=loss_function_output['loss'], nit=ii+1)
119 return dict(result)
121 def __hash__(self) -> int:
122 to_hash = [self._options, hash(self._loss_function), hash(self._step_size_parameter)]
123 return int(list_to_hash(to_hash), 16)