1 """
2 Core module for parameterization.
3 This module implements all parameterization techniques, split up in modular bits.
4
5 Observable:
6 Observable Pattern for patameterization
7 """
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37 import numpy as np
38 import re
39 import logging
40
41 from ..transformations import __fixed__, FIXED
42 from .constrainable import Constrainable
43 from .nameable import adjust_name_for_printing
44 from ..caching import FunctionCache
47 """
48 This enables optimization handles on an Object as done in GPy 0.4.
49
50 `..._optimizer_copy_transformed`: make sure the transformations and constraints etc are handled
51 """
52 - def __init__(self, name, default_constraint=None, *a, **kw):
53 super(OptimizationHandlable, self).__init__(name, default_constraint=default_constraint, *a, **kw)
54 self._optimizer_copy_ = None
55 self._optimizer_copy_transformed = False
56
57
58
59
60 @property
62 """
63 Array for the optimizer to work on.
64 This array always lives in the space for the optimizer.
65 Thus, it is untransformed, going from Transformations.
66
67 Setting this array, will make sure the transformed parameters for this model
68 will be set accordingly. It has to be set with an array, retrieved from
69 this method, as e.g. fixing will resize the array.
70
71 The optimizer should only interfere with this array, such that transformations
72 are secured.
73 """
74 if self.__dict__.get('_optimizer_copy_', None) is None or self.size != self._optimizer_copy_.size:
75 self._optimizer_copy_ = np.empty(self.size)
76
77 if not self._optimizer_copy_transformed:
78 self._optimizer_copy_.flat = self.param_array.flat
79
80
81 [np.put(self._optimizer_copy_, ind, c.finv(self.param_array[ind])) for c, ind in self.constraints.items() if c != __fixed__]
82 self._optimizer_copy_transformed = True
83
84 if self._has_fixes():
85 self._ensure_fixes()
86 return self._optimizer_copy_[self._fixes_]
87 return self._optimizer_copy_
88
89 @optimizer_array.setter
119
121 """
122 First tell all children to update,
123 then update yourself.
124
125 If trigger_parent is True, we will tell the parent, otherwise not.
126 """
127 [p._trigger_params_changed(trigger_parent=False) for p in self.parameters if not p.is_fixed]
128 self.notify_observers(None, None if trigger_parent else -np.inf)
129
136
147
148
149
150
151
152
153
154
155
156
157
158
159
160 @property
162 """
163 Return the number of parameters of this parameter_handle.
164 Param objects will always return 0.
165 """
166 raise NotImplemented("Abstract, please implement in respective classes")
167
168 - def parameter_names(self, add_self=False, adjust_for_printing=False, recursive=True, intermediate=False):
169 """
170 Get the names of all parameters of this model or parameter. It starts
171 from the parameterized object you are calling this method on.
172
173 Note: This does not unravel multidimensional parameters,
174 use parameter_names_flat to unravel parameters!
175
176 :param bool add_self: whether to add the own name in front of names
177 :param bool adjust_for_printing: whether to call `adjust_name_for_printing` on names
178 :param bool recursive: whether to traverse through hierarchy and append leaf node names
179 :param bool intermediate: whether to add intermediate names, that is parameterized objects
180 """
181 if adjust_for_printing: adjust = adjust_name_for_printing
182 else: adjust = lambda x: x
183 names = []
184 if intermediate or (not recursive):
185 names.extend([adjust(x.name) for x in self.parameters])
186 if intermediate or recursive: names.extend([
187 xi for x in self.parameters for xi in
188 x.parameter_names(add_self=True,
189 adjust_for_printing=adjust_for_printing,
190 recursive=True,
191 intermediate=False)])
192 if add_self: names = map(lambda x: adjust(self.name) + "." + x, names)
193 return names
194
196 """
197 Return the flattened parameter names for all subsequent parameters
198 of this parameter. We do not include the name for self here!
199
200 If you want the names for fixed parameters as well in this list,
201 set include_fixed to True.
202 if not hasattr(obj, 'cache'):
203 obj.cache = FunctionCacher()
204 :param bool include_fixed: whether to include fixed names here.
205 """
206 name_list = []
207 for p in self.flattened_parameters:
208 name = p.hierarchy_name()
209 if p.size > 1:
210 name_list.extend(["{}[{!s}]".format(name, i) for i in p._indices()])
211 else:
212 name_list.append(name)
213 name_list = np.array(name_list)
214
215 if not include_fixed and self._has_fixes():
216 return name_list[self._fixes_]
217 return name_list
218
219
220
221
222 - def randomize(self, rand_gen=None, *args, **kwargs):
223 """
224 Randomize the model.
225 Make this draw from the rand_gen if one exists, else draw random normal(0,1)
226
227 :param rand_gen: np random number generator which takes args and kwargs
228 :param flaot loc: loc parameter for random number generator
229 :param float scale: scale parameter for random number generator
230 :param args, kwargs: will be passed through to random number generator
231 """
232 if rand_gen is None:
233 rand_gen = np.random.normal
234
235 x = rand_gen(size=self._size_transformed(), *args, **kwargs)
236 updates = self.update_model()
237 self.update_model(False)
238 self.optimizer_array = x
239
240 x = self.param_array.copy()
241 unfixlist = np.ones((self.size,),dtype=np.bool)
242 unfixlist[self.constraints[__fixed__]] = False
243 self.param_array.flat[unfixlist] = x.view(np.ndarray).ravel()[unfixlist]
244 self.update_model(updates)
245
246
247
248
249
250 @property
252 """
253 Note to users:
254 This does not return the gradient in the right shape! Use self.gradient
255 for the right gradient array.
256
257 To work on the gradient array, use this as the gradient handle.
258 This method exists for in memory use of parameters.
259 When trying to access the true gradient array, use this.
260 """
261 self.gradient
262 return self._gradient_array_
263
265 """
266 For propagating the param_array and gradient_array.
267 This ensures the in memory view of each subsequent array.
268
269 1.) connect param_array of children to self.param_array
270 2.) tell all children to propagate further
271 """
272
273
274
275
276
277 pi_old_size = 0
278 for pi in self.parameters:
279 pislice = slice(pi_old_size, pi_old_size + pi.size)
280
281 self.param_array[pislice] = pi.param_array.flat
282 self.gradient_full[pislice] = pi.gradient_full.flat
283
284 pi.param_array.data = parray[pislice].data
285 pi.gradient_full.data = garray[pislice].data
286
287 pi._propagate_param_grad(parray[pislice], garray[pislice])
288 pi_old_size += pi.size
289
290 self._model_initialized_ = True
291
294
295
296 _name_digit = re.compile("(?P<name>.*)_(?P<digit>\d+)$")
298 """
299 A parameterisable class.
300
301 This class provides the parameters list (ArrayList) and standard parameter handling,
302 such as {link|unlink}_parameter(), traverse hierarchy and param_array, gradient_array
303 and the empty parameters_changed().
304
305 This class is abstract and should not be instantiated.
306 Use paramz.Parameterized() as node (or leaf) in the parameterized hierarchy.
307 Use paramz.Param() for a leaf in the parameterized hierarchy.
308 """
318
319
332
333 @property
335 """
336 Array representing the parameters of this class.
337 There is only one copy of all parameters in memory, two during optimization.
338
339 !WARNING!: setting the parameter array MUST always be done in memory:
340 m.param_array[:] = m_copy.param_array
341 """
342 if (self.__dict__.get('_param_array_', None) is None) or (self._param_array_.size != self.size):
343 self._param_array_ = np.empty(self.size, dtype=np.float64)
344 return self._param_array_
345
346 @property
348 """
349 Array representing the parameters of this class.
350 There is only one copy of all parameters in memory, two during optimization.
351
352 !WARNING!: setting the parameter array MUST always be done in memory:
353 m.param_array[:] = m_copy.param_array
354 """
355 if self.constraints[__fixed__].size !=0:
356 fixes = np.ones(self.size).astype(bool)
357 fixes[self.constraints[__fixed__]] = FIXED
358 return self._param_array_[fixes]
359 else:
360 return self._param_array_
361
362 - def traverse(self, visit, *args, **kwargs):
363 """
364 Traverse the hierarchy performing `visit(self, *args, **kwargs)`
365 at every node passed by downwards. This function includes self!
366
367 See *visitor pattern* in literature. This is implemented in pre-order fashion.
368
369 Example::
370
371 #Collect all children:
372
373 children = []
374 self.traverse(children.append)
375 print children
376
377 """
378 if not self.__visited:
379 visit(self, *args, **kwargs)
380 self.__visited = True
381 self._traverse(visit, *args, **kwargs)
382 self.__visited = False
383
384 - def _traverse(self, visit, *args, **kwargs):
387
388
390 """
391 Traverse the hierarchy upwards, visiting all parents and their children except self.
392 See "visitor pattern" in literature. This is implemented in pre-order fashion.
393
394 Example:
395
396 parents = []
397 self.traverse_parents(parents.append)
398 print parents
399 """
400 if self.has_parent():
401 self.__visited = True
402 self._parent_.traverse_parents(visit, *args, **kwargs)
403 self._parent_.traverse(visit, *args, **kwargs)
404 self.__visited = False
405
406
407
408
409
413 self.traverse(visit)
414
418 self.traverse(visit)
419
420
421
422
423
424 @property
426 if (self.__dict__.get('_gradient_array_', None) is None) or self._gradient_array_.size != self.size:
427 self._gradient_array_ = np.empty(self.size, dtype=np.float64)
428 return self._gradient_array_
429
430 @gradient.setter
432 self._gradient_array_[:] = val
433
434 @property
437
456
457 for other in self.parameters:
458 if (not (other is param)) and (other.name == param.name):
459 return warn_and_retry(other, _name_digit.match(other.name))
460 if pname not in dir(self):
461 self.__dict__[pname] = param
462 self._added_names_.add(pname)
463 else:
464 if pname in self._added_names_:
465 other = self.__dict__[pname]
466
467
468
469
470
471
479
483
488
489
490
491
493 """
494 In parameterizable we just need to make sure, that the next call to optimizer_array
495 will update the optimizer_array to the latest parameters
496 """
497 self._optimizer_copy_transformed = False
498 self.parameters_changed()
511
512
513
515 """
516 Notify all parameters that the parent has changed
517 """
518 for p in self.parameters:
519 p._parent_changed(self)
520
522 """
523 This method gets called when parameters have changed.
524 Another way of listening to param changes is to
525 add self as a listener to the param, such that
526 updates get passed through. See :py:function:``paramz.param.Observable.add_observer``
527 """
528 pass
529
530 - def save(self, filename, ftype='HDF5'):
531 """
532 Save all the model parameters into a file (HDF5 by default).
533
534 This is not supported yet. We are working on having a consistent,
535 human readable way of saving and loading GPy models. This only
536 saves the parameter array to a hdf5 file. In order
537 to load the model again, use the same script for building the model
538 you used to build this model. Then load the param array from this hdf5
539 file and set the parameters of the created model:
540
541 >>> m[:] = h5_file['param_array']
542
543 This is less then optimal, we are working on a better solution to that.
544 """
545 from ..param import Param
546
547 def gather_params(self, plist):
548 if isinstance(self,Param):
549 plist.append(self)
550 plist = []
551 self.traverse(gather_params, plist)
552 names = self.parameter_names(adjust_for_printing=True)
553 if ftype=='HDF5':
554 try:
555 import h5py
556 f = h5py.File(filename,'w')
557 for p,n in zip(plist,names):
558 n = n.replace('.','_')
559 p = p.values
560 d = f.create_dataset(n,p.shape,dtype=p.dtype)
561 d[:] = p
562 if hasattr(self, 'param_array'):
563 d = f.create_dataset('param_array',self.param_array.shape, dtype=self.param_array.dtype)
564 d[:] = self.param_array
565 f.close()
566 except:
567 raise 'Fails to write the parameters into a HDF5 file!'
568