Fehlermeldung

Wenn du dir nicht sicher bist, in welchem der anderen Foren du die Frage stellen sollst, dann bist du hier im Forum für allgemeine Fragen sicher richtig.
Romaxx
User
Beiträge: 62
Registriert: Donnerstag 26. Januar 2017, 18:53

Fehlermeldung

Beitragvon Romaxx » Sonntag 12. Februar 2017, 15:36

Hallo zusammen,

ich bekomme folgenden Fehler:

Code: Alles auswählen

 File "C:\Program Files\Anaconda2\lib\site-packages\spyder\utils\site\sitecustomize.py", line 888, in debugfile
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
    debugger.run("runfile(%r, args=%r, wdir=%r)" % (filename, args, wdir))
  File "C:\Program Files\Anaconda2\lib\bdb.py", line 400, in run
    exec cmd in globals, locals
  File "<string>", line 1, in <module>
  File "C:\Program Files\Anaconda2\lib\site-packages\spyder\utils\site\sitecustomize.py", line 866, in runfile
    execfile(filename, namespace)
  File "C:\Program Files\Anaconda2\lib\site-packages\spyder\utils\site\sitecustomize.py", line 87, in execfile
    exec(compile(scripttext, filename, 'exec'), glob, loc)
  File "c:/users/flo9fe/desktop/vssgp_lvm/vssgp_example.py", line 50, in <module>
    options={'ftol': 0, 'disp': False, 'maxiter': 500}, tol=0, callback=vssgp_opt.callback)
  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\_minimize.py", line 450, in minimize
    callback=callback, **options)
  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\lbfgsb.py", line 328, in _minimize_lbfgsb
    f, g = func_and_grad(x)
  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\lbfgsb.py", line 278, in func_and_grad
    f = fun(x, *args)
  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\optimize.py", line 292, in function_wrapper
    return function(*(wrapper_args + args))
  File "vssgp_opt.py", line 53, in func
    LL = sum(pool.map_async(eval_f_LL, arguments).get(9999999))
AttributeError: 'NoneType' object has no attribute 'map_async


Die entsprechende Funktion und der Fehler sind in Zeile 53 zu finden:

  1. import numpy as np
  2. from vssgp_model import VSSGP
  3. import pylab
  4. import multiprocessing
  5. def extend(x, y, z = {}):
  6.     return dict(x.items() + y.items() + z.items())
  7. pool, global_f, global_g = None, None, None
  8. def eval_f_LL(X, Y, params):
  9.     return global_f['LL'](**extend({'X': X, 'Y': Y}, params))
  10. def eval_g_LL(name, X, Y, params):
  11.     return global_g[name]['LL'](**extend({'X': X, 'Y': Y}, params))
  12.  
  13. class VSSGP_opt():
  14.     def __init__(self, N, Q, D, K, inputs, opt_params, fixed_params, use_exact_A = False, test_set = {},
  15.                  parallel = False, batch_size = None, components = None, print_interval = None):
  16.         self.vssgp, self.N, self.Q, self.K, self.fixed_params = VSSGP(use_exact_A), N, Q, K, fixed_params
  17.         self.use_exact_A, self.parallel, self.batch_size = use_exact_A, parallel, batch_size
  18.         self.inputs, self.test_set = inputs, test_set
  19.         self.print_interval = 10 if print_interval is None else print_interval
  20.         self.opt_param_names = [n for n,_ in opt_params.iteritems()]
  21.         opt_param_values = [np.atleast_2d(opt_params[n]) for n in self.opt_param_names]
  22.         self.shapes = [v.shape for v in opt_param_values]
  23.         self.sizes = [sum([np.prod(x) for x in self.shapes[:i]]) for i in xrange(len(self.shapes)+1)]
  24.         self.components = opt_params['lSigma'].shape[2] if components is None else components
  25.         self.colours = [np.random.rand(3,1) for c in xrange(self.components)]
  26.         self.callback_counter = [0]
  27.         if batch_size is not None:
  28.             if parallel:
  29.                 global pool, global_f, global_g
  30.                 global_f, global_g = self.vssgp.f, self.vssgp.g
  31.                 if __name__ == '__main__':
  32.                     pool = multiprocessing.Pool(int(self.N / self.batch_size))
  33.             else:
  34.                 self.params = np.concatenate([v.flatten() for v in opt_param_values])
  35.                 self.param_updates = np.zeros_like(self.params)
  36.                 self.moving_mean_squared = np.zeros_like(self.params)
  37.                 self.learning_rates = 1e-2*np.ones_like(self.params)
  38.  
  39.  
  40.     def unpack(self, x):
  41.         x_param_values = [x[self.sizes[i-1]:self.sizes[i]].reshape(self.shapes[i-1]) for i in xrange(1,len(self.shapes)+1)]
  42.         params = {n:v for (n,v) in zip(self.opt_param_names, x_param_values)}
  43.         if 'ltau' in params:
  44.             params['ltau'] = params['ltau'].squeeze()
  45.         return params
  46.  
  47.     def func(self, x):
  48.         params = extend(self.fixed_params, self.unpack(x))
  49.         if self.batch_size is not None:
  50.             X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
  51.             if self.parallel:
  52.                 arguments = [(X[i::splits], Y[i::splits], params) for i in xrange(splits)]
  53.                 LL = sum(pool.map_async(eval_f_LL, arguments).get(9999999))
  54.                 KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  55.             else:
  56.                 split = np.random.randint(splits)
  57.                 LL = self.N / self.batch_size * self.vssgp.f['LL'](**extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
  58.                 print(LL)
  59.                 KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  60.         else:
  61.             params = extend(self.inputs, params)
  62.             LL, KL = self.vssgp.f['LL'](**params), self.vssgp.f['KL'](**params)
  63.         return -(LL - KL)


Was ist hier nicht richtig?
BlackJack

Re: Fehlermeldung

Beitragvon BlackJack » Sonntag 12. Februar 2017, 16:09

@Romaxx: Die Verwendung von globalen Variablen ist nicht richtig. Wenn Du das Schlüsselwort ``global`` verwendest, machst Du in 99,9999% der Fälle etwas falsches.

Den ``if __name__ == '__main__':``-Test habe ich noch nie irgendwo tief in einer Funktion gesehen. Das solltest Du sein lassen. Die Funktionen/Methoden eines Moduls sollten sich gleich verhalten, egal ob das Modul importiert oder als Programm ausgeführt wird. Sonst wird Testen lustig, weil es sich dann bei Tests ja anders verhält als beim Ausführen.
Romaxx
User
Beiträge: 62
Registriert: Donnerstag 26. Januar 2017, 18:53

Re: Fehlermeldung

Beitragvon Romaxx » Sonntag 12. Februar 2017, 16:47

Hallo,

danke für deine Anwort.

Ich habe nun folgendes gemacht:

  1. import numpy as np
  2. from vssgp_model import VSSGP
  3. import pylab
  4. import multiprocessing
  5. def extend(x, y, z = {}):
  6.     return dict(x.items() + y.items() + z.items())
  7. pool, global_f, global_g = None, None, None
  8. def eval_f_LL(X, Y, params):
  9.     return global_f['LL'](**extend({'X': X, 'Y': Y}, params))
  10. def eval_g_LL(name, X, Y, params):
  11.     return global_g[name]['LL'](**extend({'X': X, 'Y': Y}, params))
  12.  
  13. class VSSGP_opt():
  14.     def __init__(self, N, Q, D, K, inputs, opt_params, fixed_params, use_exact_A = False, test_set = {},
  15.                  parallel = False, batch_size = None, components = None, print_interval = None):
  16.         self.vssgp, self.N, self.Q, self.K, self.fixed_params = VSSGP(use_exact_A), N, Q, K, fixed_params
  17.         self.use_exact_A, self.parallel, self.batch_size = use_exact_A, parallel, batch_size
  18.         self.inputs, self.test_set = inputs, test_set
  19.         self.print_interval = 10 if print_interval is None else print_interval
  20.         self.opt_param_names = [n for n,_ in opt_params.iteritems()]
  21.         opt_param_values = [np.atleast_2d(opt_params[n]) for n in self.opt_param_names]
  22.         self.shapes = [v.shape for v in opt_param_values]
  23.         self.sizes = [sum([np.prod(x) for x in self.shapes[:i]]) for i in xrange(len(self.shapes)+1)]
  24.         self.components = opt_params['lSigma'].shape[2] if components is None else components
  25.         self.colours = [np.random.rand(3,1) for c in xrange(self.components)]
  26.         self.callback_counter = [0]
  27.         if batch_size is not None:
  28.             if parallel:
  29.                 self.pool = multiprocessing.Pool(int(self.N / self.batch_size))
  30.             else:
  31.                 self.params = np.concatenate([v.flatten() for v in opt_param_values])
  32.                 self.param_updates = np.zeros_like(self.params)
  33.                 self.moving_mean_squared = np.zeros_like(self.params)
  34.                 self.learning_rates = 1e-2*np.ones_like(self.params)
  35.  
  36.  
  37.     def unpack(self, x):
  38.         x_param_values = [x[self.sizes[i-1]:self.sizes[i]].reshape(self.shapes[i-1]) for i in xrange(1,len(self.shapes)+1)]
  39.         params = {n:v for (n,v) in zip(self.opt_param_names, x_param_values)}
  40.         if 'ltau' in params:
  41.             params['ltau'] = params['ltau'].squeeze()
  42.         return params
  43.  
  44.     def func(self, x):
  45.         params = extend(self.fixed_params, self.unpack(x))
  46.         if self.batch_size is not None:
  47.             X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
  48.             if self.parallel:
  49.                 arguments = [(X[i::splits], Y[i::splits], params) for i in xrange(splits)]
  50.                 LL = sum(self.pool.map_async(eval_f_LL, arguments).get(9999999))
  51.                 KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  52.             else:
  53.                 split = np.random.randint(splits)
  54.                 LL = self.N / self.batch_size * self.vssgp.f['LL'](**extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
  55.                 print(LL)
  56.                 KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  57.         else:
  58.             params = extend(self.inputs, params)
  59.             LL, KL = self.vssgp.f['LL'](**params), self.vssgp.f['KL'](**params)
  60.         return -(LL - KL)


Und erhalte diesen Fehler:

Code: Alles auswählen

RuntimeError:
            Attempt to start a new process before the current process
            has finished its bootstrapping phase.

            This probably means that you are on Windows and you have
            forgotten to use the proper idiom in the main module:

                if __name__ == '__main__':
                    freeze_support()
                    ...

            The "freeze_support()" line can be omitted if the program
            is not going to be frozen to produce a Windows executable.


Wenn ich

  1.                 if __name__ == '__main__':


an besagte Stelle wieder einfüge, erhalte ich:

Code: Alles auswählen

File "C:\Program Files\Anaconda2\lib\site-packages\spyder\utils\site\sitecustomize.py", line 888, in debugfile
    debugger.run("runfile(%r, args=%r, wdir=%r)" % (filename, args, wdir))
  File "C:\Program Files\Anaconda2\lib\bdb.py", line 400, in run
    exec cmd in globals, locals
  File "<string>", line 1, in <module>
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>


  File "C:\Program Files\Anaconda2\lib\site-packages\spyder\utils\site\sitecustomize.py", line 866, in runfile
    execfile(filename, namespace)
  File "C:\Program Files\Anaconda2\lib\site-packages\spyder\utils\site\sitecustomize.py", line 87, in execfile
    exec(compile(scripttext, filename, 'exec'), glob, loc)
  File "c:/users/flo9fe/desktop/vssgp_lvm/vssgp_example.py", line 50, in <module>
    options={'ftol': 0, 'disp': False, 'maxiter': 500}, tol=0, callback=vssgp_opt.callback)
  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\_minimize.py", line 450, in minimize
    callback=callback, **options)
  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\lbfgsb.py", line 328, in _minimize_lbfgsb
    f, g = func_and_grad(x)
  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\lbfgsb.py", line 278, in func_and_grad
    f = fun(x, *args)
  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\optimize.py", line 292, in function_wrapper
    return function(*(wrapper_args + args))
  File "vssgp_opt.py", line 52, in func
    LL = sum(self.pool.map_async(eval_f_LL, arguments).get(9999999))
AttributeError: VSSGP_opt instance has no attribute 'pool'
BlackJack

Re: Fehlermeldung

Beitragvon BlackJack » Sonntag 12. Februar 2017, 17:03

@Romaxx: Was ist ”besagte” Stelle? Ausserdem verwendest Du immer noch globale Datenstrukturen, die es in den anderen Prozessen nicht geben wird.
Romaxx
User
Beiträge: 62
Registriert: Donnerstag 26. Januar 2017, 18:53

Re: Fehlermeldung

Beitragvon Romaxx » Sonntag 12. Februar 2017, 17:26

Entschuldige die Ungenauigkeit.
Ich meine

  1. if __name__ == '__main__':


in Zeile 28/29 wieder eingefügt.

Ich habe nun mein File zu folgendem geändert:

  1. import numpy as np
  2. from vssgp_model import VSSGP
  3. import pylab
  4. import multiprocessing
  5. def extend(x, y, z = {}):
  6.     return dict(x.items() + y.items() + z.items())
  7. def eval_f_LL(X, Y, params):
  8.     return VSSGP.f['LL'](**extend({'X': X, 'Y': Y}, params))
  9. def eval_g_LL(name, X, Y, params):
  10.     return VSSGP.g[name]['LL'](**extend({'X': X, 'Y': Y}, params))
  11.  
  12. class VSSGP_opt():
  13.     def __init__(self, N, Q, D, K, inputs, opt_params, fixed_params, use_exact_A = False, test_set = {},
  14.                  parallel = False, batch_size = None, components = None, print_interval = None):
  15.         self.vssgp, self.N, self.Q, self.K, self.fixed_params = VSSGP(use_exact_A), N, Q, K, fixed_params
  16.         self.use_exact_A, self.parallel, self.batch_size = use_exact_A, parallel, batch_size
  17.         self.inputs, self.test_set = inputs, test_set
  18.         self.print_interval = 10 if print_interval is None else print_interval
  19.         self.opt_param_names = [n for n,_ in opt_params.iteritems()]
  20.         opt_param_values = [np.atleast_2d(opt_params[n]) for n in self.opt_param_names]
  21.         self.shapes = [v.shape for v in opt_param_values]
  22.         self.sizes = [sum([np.prod(x) for x in self.shapes[:i]]) for i in xrange(len(self.shapes)+1)]
  23.         self.components = opt_params['lSigma'].shape[2] if components is None else components
  24.         self.colours = [np.random.rand(3,1) for c in xrange(self.components)]
  25.         self.callback_counter = [0]
  26.         if batch_size is not None:
  27.             if parallel:
  28.                 self.pool = multiprocessing.Pool(int(self.N / self.batch_size))
  29.             else:
  30.                 self.params = np.concatenate([v.flatten() for v in opt_param_values])
  31.                 self.param_updates = np.zeros_like(self.params)
  32.                 self.moving_mean_squared = np.zeros_like(self.params)
  33.                 self.learning_rates = 1e-2*np.ones_like(self.params)
  34.  
  35.  
  36.     def unpack(self, x):
  37.         x_param_values = [x[self.sizes[i-1]:self.sizes[i]].reshape(self.shapes[i-1]) for i in xrange(1,len(self.shapes)+1)]
  38.         params = {n:v for (n,v) in zip(self.opt_param_names, x_param_values)}
  39.         if 'ltau' in params:
  40.             params['ltau'] = params['ltau'].squeeze()
  41.         return params
  42.  
  43.     def func(self, x):
  44.         params = extend(self.fixed_params, self.unpack(x))
  45.         if self.batch_size is not None:
  46.             X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
  47.             if self.parallel:
  48.                 arguments = [(X[i::splits], Y[i::splits], params) for i in xrange(splits)]
  49.                 LL = sum(self.pool.map_async(eval_f_LL, arguments).get(9999999))
  50.                 KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  51.             else:
  52.                 split = np.random.randint(splits)
  53.                 LL = self.N / self.batch_size * self.vssgp.f['LL'](**extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
  54.                 print(LL)
  55.                 KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  56.         else:
  57.             params = extend(self.inputs, params)
  58.             LL, KL = self.vssgp.f['LL'](**params), self.vssgp.f['KL'](**params)
  59.         return -(LL - KL)


Ich bekomme aber ohne

  1. if __name__ == '__main__':


in 28/29 wieder den Fehler

Code: Alles auswählen

RuntimeError:
            Attempt to start a new process before the current process
            has finished its bootstrapping phase.

            This probably means that you are on Windows and you have
            forgotten to use the proper idiom in the main module:

                if __name__ == '__main__':
                    freeze_support()
                    ...

            The "freeze_support()" line can be omitted if the program
            is not going to be frozen to produce a Windows executable.


und mit

Code: Alles auswählen

File "C:\Program Files\Anaconda2\lib\site-packages\spyder\utils\site\sitecustomize.py", line 888, in debugfile
    debugger.run("runfile(%r, args=%r, wdir=%r)" % (filename, args, wdir))
  File "C:\Program Files\Anaconda2\lib\bdb.py", line 400, in run
    exec cmd in globals, locals
  File "<string>", line 1, in <module>
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>


  File "C:\Program Files\Anaconda2\lib\site-packages\spyder\utils\site\sitecustomize.py", line 866, in runfile
    execfile(filename, namespace)
  File "C:\Program Files\Anaconda2\lib\site-packages\spyder\utils\site\sitecustomize.py", line 87, in execfile
    exec(compile(scripttext, filename, 'exec'), glob, loc)
  File "c:/users/flo9fe/desktop/vssgp_lvm/vssgp_example.py", line 50, in <module>
    options={'ftol': 0, 'disp': False, 'maxiter': 500}, tol=0, callback=vssgp_opt.callback)
  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\_minimize.py", line 450, in minimize
    callback=callback, **options)
  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\lbfgsb.py", line 328, in _minimize_lbfgsb
    f, g = func_and_grad(x)
  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\lbfgsb.py", line 278, in func_and_grad
    f = fun(x, *args)
  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\optimize.py", line 292, in function_wrapper
    return function(*(wrapper_args + args))
  File "vssgp_opt.py", line 52, in func
    LL = sum(self.pool.map_async(eval_f_LL, arguments).get(9999999))
AttributeError: VSSGP_opt instance has no attribute 'pool'


ich muss dazu sagen, dass das nicht mein Code ist, ich möchte ihn aber zum Laufen bekommen, da es sich um eine Demo handelt (höchstwahrscheinlich für linux optimiert).

Grüße
BlackJack

Re: Fehlermeldung

Beitragvon BlackJack » Sonntag 12. Februar 2017, 17:32

@Romaxx: In der Fehlermeldung (und der Dokumentaton von `multiprocessing`) steht, dass das Hauptmodul, also das was als Programm ausgeführt wird, so abgesichert werden muss. Was man sowieso tun sollte, auch wenn man nicht multiprocessing verwendet.

Und ich meinte auch nicht das Du einfach die ``if``-Zeile raus löschst, das verändert dann natürlich das Verhalten des Programms, sondern das man das insgesamt so nicht schreiben würde. Also ich zumindest nicht.
Romaxx
User
Beiträge: 62
Registriert: Donnerstag 26. Januar 2017, 18:53

Re: Fehlermeldung

Beitragvon Romaxx » Sonntag 12. Februar 2017, 17:43

Ok, vergessen wir mal meine Änderungen.
Ich habe folgende Datei:

  1. import numpy as np
  2. from vssgp_model import VSSGP
  3. import multiprocessing
  4. def extend(x, y, z = {}):
  5.     return dict(x.items() + y.items() + z.items())
  6. pool, global_f, global_g = None, None, None
  7. def eval_f_LL(X, Y, params):
  8.     return global_f['LL'](**extend({'X': X, 'Y': Y}, params))
  9. def eval_g_LL(name, X, Y, params):
  10.     return global_g[name]['LL'](**extend({'X': X, 'Y': Y}, params))
  11.  
  12. class VSSGP_opt():
  13.     def __init__(self, N, Q, D, K, inputs, opt_params, fixed_params, use_exact_A = False, test_set = {},
  14.                  parallel = False, batch_size = None, components = None, print_interval = None):
  15.         self.vssgp, self.N, self.Q, self.K, self.fixed_params = VSSGP(use_exact_A), N, Q, K, fixed_params
  16.         self.use_exact_A, self.parallel, self.batch_size = use_exact_A, parallel, batch_size
  17.         self.inputs, self.test_set = inputs, test_set
  18.         self.print_interval = 10 if print_interval is None else print_interval
  19.         self.opt_param_names = [n for n,_ in opt_params.iteritems()]
  20.         opt_param_values = [np.atleast_2d(opt_params[n]) for n in self.opt_param_names]
  21.         self.shapes = [v.shape for v in opt_param_values]
  22.         self.sizes = [sum([np.prod(x) for x in self.shapes[:i]]) for i in xrange(len(self.shapes)+1)]
  23.         self.components = opt_params['lSigma'].shape[2] if components is None else components
  24.         self.colours = [np.random.rand(3,1) for c in xrange(self.components)]
  25.         self.callback_counter = [0]
  26.         if 'train_ind' not in test_set:
  27.             print('train_ind not found!')
  28.             self.test_set['train_ind'] = np.arange(inputs['X'].shape[0]).astype(int)
  29.             self.test_set['test_ind'] = np.arange(0).astype(int)
  30.         if batch_size is not None:
  31.             if parallel:
  32.                 global pool, global_f, global_g
  33.                 global_f, global_g = self.vssgp.f, self.vssgp.g
  34.                 pool = multiprocessing.Pool(int(self.N / self.batch_size))
  35.             else:
  36.                 self.params = np.concatenate([v.flatten() for v in opt_param_values])
  37.                 self.param_updates = np.zeros_like(self.params)
  38.                 self.moving_mean_squared = np.zeros_like(self.params)
  39.                 self.learning_rates = 1e-2*np.ones_like(self.params)
  40.  
  41.  
  42.     def unpack(self, x):
  43.         x_param_values = [x[self.sizes[i-1]:self.sizes[i]].reshape(self.shapes[i-1]) for i in xrange(1,len(self.shapes)+1)]
  44.         params = {n:v for (n,v) in zip(self.opt_param_names, x_param_values)}
  45.         if 'ltau' in params:
  46.             params['ltau'] = params['ltau'].squeeze()
  47.         return params
  48.  
  49.     def func(self, x):
  50.         params = extend(self.fixed_params, self.unpack(x))
  51.         if self.batch_size is not None:
  52.             X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
  53.             if self.parallel:
  54.                 arguments = [(X[i::splits], Y[i::splits], params) for i in xrange(splits)]
  55.                 LL = sum(pool.map_async(eval_f_LL, arguments).get(9999999))
  56.                 KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  57.             else:
  58.                 split = np.random.randint(splits)
  59.                 LL = self.N / self.batch_size * self.vssgp.f['LL'](**extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
  60.                 print(LL)
  61.                 KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  62.         else:
  63.             params = extend(self.inputs, params)
  64.             LL, KL = self.vssgp.f['LL'](**params), self.vssgp.f['KL'](**params)
  65.         return -(LL - KL)
  66.  
  67.     def fprime(self, x):
  68.         grads, params = [], extend(self.fixed_params, self.unpack(x))
  69.         for n in self.opt_param_names:
  70.             if self.batch_size is not None:
  71.                 X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
  72.                 if self.parallel:
  73.                     arguments = [(n, X[i::splits], Y[i::splits], params) for i in xrange(splits)]
  74.                     dLL = sum(pool.map_async(eval_g_LL, arguments).get(9999999))
  75.                     dKL = self.vssgp.g[n]['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  76.                 else:
  77.                     split = np.random.randint(splits)
  78.                     dLL = self.N / self.batch_size * self.vssgp.g[n]['LL'](**extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
  79.                     dKL = self.vssgp.g[n]['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  80.             else:
  81.                 params = extend(self.inputs, params)
  82.                 dLL, dKL = self.vssgp.g[n]['LL'](**params), self.vssgp.g[n]['KL'](**params)
  83.             grads += [-(dLL - dKL)]
  84.         return np.concatenate([grad.flatten() for grad in grads])
  85.  
  86.     def callback(self, x):
  87.         if self.callback_counter[0]%self.print_interval == 0:
  88.             opt_params = self.unpack(x)
  89.             params = extend(self.inputs, self.fixed_params, opt_params)
  90.             LL = self.vssgp.f['LL'](**params)
  91.             KL = self.vssgp.f['KL'](**params)
  92.             print(LL - KL)
  93.         self.callback_counter[0] += 1


Diese erhält lediglich über self.vssgp.g bzw. self.vssgp.f die Funktionen, die ausgeführt werden sollen.

Wie ändere ich diese Datei, sodass es parallelisiert läuft?

Kannst du mir hier helfen?

Ich möchte es unter Windows zum Laufen bekommen.

Das vollständige Funkionenpaket der Demo findet sich hier: https://github.com/yaringal/VSSGP

wobei eigentlich nur die hier zitierte die wichtige für das parallelisieren ist.
Romaxx
User
Beiträge: 62
Registriert: Donnerstag 26. Januar 2017, 18:53

Re: Fehlermeldung

Beitragvon Romaxx » Sonntag 12. Februar 2017, 21:09

Kannst du mir wenigstens mitteilen, wie du es schreiben würdest.

Vielleicht bekomme ich dann ein besseres Gespür, wie ich es dann umschreiben kann.
BlackJack

Re: Fehlermeldung

Beitragvon BlackJack » Montag 13. Februar 2017, 00:35

@Romaxx: Ich würde es so schreiben wie die `multiprocessing`-API es verlangt. Das Modul das als Programm ausgeführt wird, muss sich ohne Effekte importieren lassen. Das ist ja *so gar nicht erfüllt*. Der gesamte Code steht einfach auf Modulebene. Der Code gehört in eine Funktion und die dann mit dem ``if __name__ == '__main__':``-Idiom geschützt.

Wenn es dann nicht funktioniert, würde ich es entweder erst einmal unter Linux testen, oder beim Autor des Codes nachfragen.
Romaxx
User
Beiträge: 62
Registriert: Donnerstag 26. Januar 2017, 18:53

Re: Fehlermeldung

Beitragvon Romaxx » Montag 13. Februar 2017, 10:38

Hallo,

danke für deine Antwort.

Mir fällt es schwer dir zu folgen. Was meinst du mir 'ohne Effekte importieren'. Mir sind solche Begriffe leider nicht bekannt.
Und ' Der Code gehört in eine Funktion und die dann mit dem ``if __name__ == '__main__':``-Idiom geschützt'.
Welchen Code meinst du hier? Diesen vielleicht : pool = multiprocessing.Pool(int(self.N / self.batch_size)).
Du meinst also:

  1. class VSSGP_opt():
  2.     def __init__(self, N, Q, D, K, inputs, opt_params, fixed_params, use_exact_A = False, test_set = {},
  3.                  parallel = False, batch_size = None, components = None, print_interval = None):
  4.         self.vssgp, self.N, self.Q, self.K, self.fixed_params = VSSGP(use_exact_A), N, Q, K, fixed_params
  5.         self.use_exact_A, self.parallel, self.batch_size = use_exact_A, parallel, batch_size
  6.         self.inputs, self.test_set = inputs, test_set
  7.         self.print_interval = 10 if print_interval is None else print_interval
  8.         self.opt_param_names = [n for n,_ in opt_params.iteritems()]
  9.         opt_param_values = [np.atleast_2d(opt_params[n]) for n in self.opt_param_names]
  10.         self.shapes = [v.shape for v in opt_param_values]
  11.         self.sizes = [sum([np.prod(x) for x in self.shapes[:i]]) for i in xrange(len(self.shapes)+1)]
  12.         self.components = opt_params['lSigma'].shape[2] if components is None else components
  13.         self.colours = [np.random.rand(3,1) for c in xrange(self.components)]
  14.         self.callback_counter = [0]
  15.         if batch_size is not None:
  16.             if parallel:
  17.                 global pool, global_f, global_g
  18.                 global_f, global_g = self.vssgp.f, self.vssgp.g
  19.                 if __name__ == '__main__':
  20.                     multiprocessing.freeze_support()
  21.                     pool = multiprocessing.Pool(int(self.N / self.batch_size))
  22.             else:
  23.                 self.params = np.concatenate([v.flatten() for v in opt_param_values])
  24.                 self.param_updates = np.zeros_like(self.params)
  25.                 self.moving_mean_squared = np.zeros_like(self.params)
  26.                 self.learning_rates = 1e-2*np.ones_like(self.params)
  27.     def multiprocess(self):
  28.                 if __name__ == '__main__':
  29.                     pool = multiprocessing.Pool(int(self.N / self.batch_size))
  30.              return (pool)


Und 'pool' dann unter 'def func(self, x):' aufrufen?

  1. def func(self, x):
  2.         params = extend(self.fixed_params, self.unpack(x))
  3.         if self.batch_size is not None:
  4.             X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
  5.             if self.parallel:
  6.                 arguments = [(X[i::splits], Y[i::splits], params) for i in xrange(splits)]
  7.                 pool = self.multiprocess()
  8.                 LL = sum(pool.map_async(eval_f_LL, arguments).get(9999999))
  9.                 KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  10.             else:
  11.                 split = np.random.randint(splits)
  12.                 LL = self.N / self.batch_size * self.vssgp.f['LL'](**extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
  13.                 print LL
  14.                 KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  15.         else:
  16.             params = extend(self.inputs, params)
  17.             LL, KL = self.vssgp.f['LL'](**params), self.vssgp.f['KL'](**params)
  18.         return -(LL - KL)


Das Problem ist, wenn ich das so umsetze, geht der Programmdurchlauf nicht durch 'if __name__ == '__main__':', sodass ich kein 'pool' in 'multiprocess' ausgeben kann.
BlackJack

Re: Fehlermeldung

Beitragvon BlackJack » Montag 13. Februar 2017, 11:11

@Romaxx: Ich meine den Code in dem Modul das als Programm ausgeführt wird. Den gesamten. Ohne Effekt importieren heisst, man kann ein Modul importieren ohne das irgendwas passiert (ausser das Konstanten, Funktionen, und Klassen definiert werden). Das sollte in einem sauberen Programm für jedes Modul gelten. Bei `multiprocessing` ist das dann zum Beispiel sehr wichtig wie man sieht. Aber auch zum Testen, automatisiert oder manuell für die Fehlersuche, und für einige Werkzeuge, zum Beispiel zur Dokumentationserstellung aus dem Code, ist das importieren eines Moduls ohne dass da irgendein grösseres Programm abläuft oder gar Dateien oder Datenbankverbindungen geöffnet, Hardware angesprochen, externe Prozesse gestartet, … werden, wichtig.

Also wenn Du in das Verzeichnis wechselst, eine Python-Shell startest, und dort ``import VSSGP_example`` eingibst, dann darf nichts weiter passieren als dass das Modul importiert wird und in dem Modul ggf. Konstanten, Funktionen, und Klassen definiert werden. Das gilt transitiv, das heisst auch Module die in der Folge des importierens importiert werden, dürfen keine weiteren Effekte haben. Das ist eine Grundbedingung die das `multiprocessing`-Modul stellt. Zumindest auf Plattformen die kein `fork()` á la Unix kennen. Auf solchen Plattformen werden für das Multiprocessing nämlich neue Prozesse gestartet und das Modul das als Programm gestartet wurde, wird in diesen Prozessen importiert um eine möglichst ähnliche ”Umgebung” bereit zu stellen.

Also mindestens mal alles ab Zeile 9 (inklusive) in dem Modul gehört in eine Funktion die nur aufgerufen wird wenn das Modul als Programm ausgeführt wird.
Romaxx
User
Beiträge: 62
Registriert: Donnerstag 26. Januar 2017, 18:53

Re: Fehlermeldung

Beitragvon Romaxx » Montag 13. Februar 2017, 11:24

D.h. du meinst vssgp_model.f.
Diese Funktion wird doch aber in

  1. import numpy as np
  2. from vssgp_model import VSSGP
  3. import multiprocessing
  4. def extend(x, y, z = {}):
  5.     return dict(x.items() + y.items() + z.items())
  6. global_f, global_g = None, None
  7. def eval_f_LL((X, Y, params)):
  8.     return global_f['LL'](**extend({'X': X, 'Y': Y}, params))
  9. def eval_g_LL((name, X, Y, params)):
  10.     return global_g[name]['LL'](**extend({'X': X, 'Y': Y}, params))
  11.  
  12. class VSSGP_opt():
  13.     def __init__(self, N, Q, D, K, inputs, opt_params, fixed_params, use_exact_A = False, test_set = {},
  14.                  parallel = False, batch_size = None, components = None, print_interval = None):
  15.         self.vssgp, self.N, self.Q, self.K, self.fixed_params = VSSGP(use_exact_A), N, Q, K, fixed_params
  16.         self.use_exact_A, self.parallel, self.batch_size = use_exact_A, parallel, batch_size
  17.         self.inputs, self.test_set = inputs, test_set
  18.         self.print_interval = 10 if print_interval is None else print_interval
  19.         self.opt_param_names = [n for n,_ in opt_params.iteritems()]
  20.         opt_param_values = [np.atleast_2d(opt_params[n]) for n in self.opt_param_names]
  21.         self.shapes = [v.shape for v in opt_param_values]
  22.         self.sizes = [sum([np.prod(x) for x in self.shapes[:i]]) for i in xrange(len(self.shapes)+1)]
  23.         self.components = opt_params['lSigma'].shape[2] if components is None else components
  24.         self.colours = [np.random.rand(3,1) for c in xrange(self.components)]
  25.         self.callback_counter = [0]
  26.         if batch_size is not None:
  27.             if parallel:
  28.                 global global_f, global_g
  29.                 global_f, global_g = self.vssgp.f, self.vssgp.g
  30.             else:
  31.                 self.params = np.concatenate([v.flatten() for v in opt_param_values])
  32.                 self.param_updates = np.zeros_like(self.params)
  33.                 self.moving_mean_squared = np.zeros_like(self.params)
  34.                 self.learning_rates = 1e-2*np.ones_like(self.params)
  35.                
  36.     def multiprocess(self):
  37.         if __name__ == '__main__':
  38.             pool = multiprocessing.Pool(int(self.N / self.batch_size))
  39.         return (pool)
  40.  
  41.  
  42.     def unpack(self, x):
  43.         x_param_values = [x[self.sizes[i-1]:self.sizes[i]].reshape(self.shapes[i-1]) for i in xrange(1,len(self.shapes)+1)]
  44.         params = {n:v for (n,v) in zip(self.opt_param_names, x_param_values)}
  45.         if 'ltau' in params:
  46.             params['ltau'] = params['ltau'].squeeze()
  47.         return params
  48.  
  49.     def func(self, x):
  50.         params = extend(self.fixed_params, self.unpack(x))
  51.         if self.batch_size is not None:
  52.             X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
  53.             if self.parallel:
  54.                 arguments = [(X[i::splits], Y[i::splits], params) for i in xrange(splits)]
  55.                 pool = self.multiprocess()
  56.                 LL = sum(pool.map_async(eval_f_LL, arguments).get(9999999))
  57.                 KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  58.             else:
  59.                 split = np.random.randint(splits)
  60.                 LL = self.N / self.batch_size * self.vssgp.f['LL'](**extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
  61.                 print LL
  62.                 KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  63.         else:
  64.             params = extend(self.inputs, params)
  65.             LL, KL = self.vssgp.f['LL'](**params), self.vssgp.f['KL'](**params)
  66.         return -(LL - KL)


Zeile 2,15 und 29 lediglich geladen und nicht ausgeführt.
Das Problem mit dieser Funktion vssgp_model.f ist, dass es sich hierbei um eine theano compilierte Funktion handelt, die für die Berechnung z.b. der Gradient oder des Funktionswertes auf effizienter Basis erstellt wurde, d.h. dort Änderungen zu unternehmen, ist wahrscheinlich nicht einfach.
BlackJack

Re: Fehlermeldung

Beitragvon BlackJack » Montag 13. Februar 2017, 13:19

@Romaxx: Wieso meine ich ``vssgp_model.f``? Was passiert(e) denn nach der nötigen Änderung um den Code in `VSSGP_example` vor dem Ausführen beim Importieren zu schützen?
Benutzeravatar
Kebap
User
Beiträge: 345
Registriert: Dienstag 15. November 2011, 14:20
Wohnort: Dortmund

Re: Fehlermeldung

Beitragvon Kebap » Dienstag 14. Februar 2017, 12:06

Anscheinend fehlen hier Grundlagen zum Thema Python Module importieren
MorgenGrauen: 1 Welt, >12 Gilden, >85 Abenteuer, >1000 Waffen und Rüstungen,
>2500 NPC, >16000 Räume, >170 freiwillige Programmierer, einfach Text, seit 1992.
Romaxx
User
Beiträge: 62
Registriert: Donnerstag 26. Januar 2017, 18:53

Re: Fehlermeldung

Beitragvon Romaxx » Mittwoch 15. Februar 2017, 11:27

Ok, jetzt bin auch ich etwas verwirrt.
Können wir noch einmal von vorne beginnen?
Ich habe mir den Link von Kebap nun durchgelesen und hoffe damit mit einer Unterstützng weiter zu kommen.

Also, ich habe folgendes Modul für eine Optimierung einer Funktion mit Theano.

  1. import numpy as np
  2.     from vssgp_model import VSSGP
  3.     import multiprocessing
  4.     def extend(x, y, z = {}):
  5.         return dict(x.items() + y.items() + z.items())
  6.     pool, global_f, global_g = None, None, None
  7.     def eval_f_LL(X, Y, params):
  8.         return global_f['LL'](**extend({'X': X, 'Y': Y}, params))
  9.     def eval_g_LL(name, X, Y, params):
  10.         return global_g[name]['LL'](**extend({'X': X, 'Y': Y}, params))
  11.      
  12.     class VSSGP_opt():
  13.         def __init__(self, N, Q, D, K, inputs, opt_params, fixed_params, use_exact_A = False, test_set = {},
  14.                      parallel = False, batch_size = None, components = None, print_interval = None):
  15.             self.vssgp, self.N, self.Q, self.K, self.fixed_params = VSSGP(use_exact_A), N, Q, K, fixed_params
  16.             self.use_exact_A, self.parallel, self.batch_size = use_exact_A, parallel, batch_size
  17.             self.inputs, self.test_set = inputs, test_set
  18.             self.print_interval = 10 if print_interval is None else print_interval
  19.             self.opt_param_names = [n for n,_ in opt_params.iteritems()]
  20.             opt_param_values = [np.atleast_2d(opt_params[n]) for n in self.opt_param_names]
  21.             self.shapes = [v.shape for v in opt_param_values]
  22.             self.sizes = [sum([np.prod(x) for x in self.shapes[:i]]) for i in xrange(len(self.shapes)+1)]
  23.             self.components = opt_params['lSigma'].shape[2] if components is None else components
  24.             self.colours = [np.random.rand(3,1) for c in xrange(self.components)]
  25.             self.callback_counter = [0]
  26.             if 'train_ind' not in test_set:
  27.                 print('train_ind not found!')
  28.                 self.test_set['train_ind'] = np.arange(inputs['X'].shape[0]).astype(int)
  29.                 self.test_set['test_ind'] = np.arange(0).astype(int)
  30.             if batch_size is not None:
  31.                 if parallel:
  32.                     global pool, global_f, global_g
  33.                     global_f, global_g = self.vssgp.f, self.vssgp.g
  34.                     pool = multiprocessing.Pool(int(self.N / self.batch_size))
  35.                 else:
  36.                     self.params = np.concatenate([v.flatten() for v in opt_param_values])
  37.                     self.param_updates = np.zeros_like(self.params)
  38.                     self.moving_mean_squared = np.zeros_like(self.params)
  39.                     self.learning_rates = 1e-2*np.ones_like(self.params)
  40.      
  41.      
  42.         def unpack(self, x):
  43.             x_param_values = [x[self.sizes[i-1]:self.sizes[i]].reshape(self.shapes[i-1]) for i in xrange(1,len(self.shapes)+1)]
  44.             params = {n:v for (n,v) in zip(self.opt_param_names, x_param_values)}
  45.             if 'ltau' in params:
  46.                 params['ltau'] = params['ltau'].squeeze()
  47.             return params
  48.      
  49.         def func(self, x):
  50.             params = extend(self.fixed_params, self.unpack(x))
  51.             if self.batch_size is not None:
  52.                 X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
  53.                 if self.parallel:
  54.                     arguments = [(X[i::splits], Y[i::splits], params) for i in xrange(splits)]
  55.                     LL = sum(pool.map_async(eval_f_LL, arguments).get(9999999))
  56.                     KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  57.                 else:
  58.                     split = np.random.randint(splits)
  59.                     LL = self.N / self.batch_size * self.vssgp.f['LL'](**extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
  60.                     print(LL)
  61.                     KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  62.             else:
  63.                 params = extend(self.inputs, params)
  64.                 LL, KL = self.vssgp.f['LL'](**params), self.vssgp.f['KL'](**params)
  65.             return -(LL - KL)
  66.      
  67.         def fprime(self, x):
  68.             grads, params = [], extend(self.fixed_params, self.unpack(x))
  69.             for n in self.opt_param_names:
  70.                 if self.batch_size is not None:
  71.                     X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
  72.                     if self.parallel:
  73.                         arguments = [(n, X[i::splits], Y[i::splits], params) for i in xrange(splits)]
  74.                         dLL = sum(pool.map_async(eval_g_LL, arguments).get(9999999))
  75.                         dKL = self.vssgp.g[n]['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  76.                     else:
  77.                         split = np.random.randint(splits)
  78.                         dLL = self.N / self.batch_size * self.vssgp.g[n]['LL'](**extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
  79.                         dKL = self.vssgp.g[n]['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  80.                 else:
  81.                     params = extend(self.inputs, params)
  82.                     dLL, dKL = self.vssgp.g[n]['LL'](**params), self.vssgp.g[n]['KL'](**params)
  83.                 grads += [-(dLL - dKL)]
  84.             return np.concatenate([grad.flatten() for grad in grads])
  85.      
  86.         def callback(self, x):
  87.             if self.callback_counter[0]%self.print_interval == 0:
  88.                 opt_params = self.unpack(x)
  89.                 params = extend(self.inputs, self.fixed_params, opt_params)
  90.                 LL = self.vssgp.f['LL'](**params)
  91.                 KL = self.vssgp.f['KL'](**params)
  92.                 print(LL - KL)
  93.             self.callback_counter[0] += 1


Beim erstmaligen ausführen compiliert mir Theano den Code meiner zu optimierenden Funktion und ich kann durch vssgp_model.f und vssgp_model.g jeweils den Funktionswert der zu optimierenden Funktion und den Gradienten ausgeben ( natürlich mit einem gewissen input, z.B. vssgp_model.f'['LL'](**params); params ist eine Liste mit Variablen).

Wie du siehst, ist in Zeile 33,34 kein

  1. if __name__ == '__main__':


zu finden. Das ist der Demo-Code, d.h. ich habe hier nichts geändert. Ich hatte bei meinem aller ersten Post das aber drin gehabt, eben weil ich auch in der Multiprocessing Doku gelesen habe, dass man das eigentlich mit einfügen sollte. Das ging straightforward aber dann schief, wie fast zu erwarten war.

Ich glaube, bitte korrigiere mich, wenn ich falsch liege, dass ich die globalen Variablen durch

  1.     import numpy as np
  2.     from vssgp_model import VSSGP
  3.     import pylab
  4.     import multiprocessing
  5.     def extend(x, y, z = {}):
  6.         return dict(x.items() + y.items() + z.items())
  7.     def eval_f_LL(X, Y, params):
  8.         out_f = VSSGP.f['LL'](**extend({'X': X, 'Y': Y}, params))
  9.         return out_f
  10.     def eval_g_LL(name, X, Y, params):
  11.         out_g = VSSGP.f['LL'](**extend({'X': X, 'Y': Y}, params))
  12.         return out_g
  13.      
  14.     class VSSGP_opt():
  15.         def __init__(self, N, Q, D, K, inputs, opt_params, fixed_params, use_exact_A = False, test_set = {},
  16.                      parallel = False, batch_size = None, components = None, print_interval = None):
  17.             self.vssgp, self.N, self.Q, self.K, self.fixed_params = VSSGP(use_exact_A), N, Q, K, fixed_params
  18.             self.use_exact_A, self.parallel, self.batch_size = use_exact_A, parallel, batch_size
  19.             self.inputs, self.test_set = inputs, test_set
  20.             self.print_interval = 10 if print_interval is None else print_interval
  21.             self.opt_param_names = [n for n,_ in opt_params.iteritems()]
  22.             opt_param_values = [np.atleast_2d(opt_params[n]) for n in self.opt_param_names]
  23.             self.shapes = [v.shape for v in opt_param_values]
  24.             self.sizes = [sum([np.prod(x) for x in self.shapes[:i]]) for i in xrange(len(self.shapes)+1)]
  25.             self.components = opt_params['lSigma'].shape[2] if components is None else components
  26.             self.colours = [np.random.rand(3,1) for c in xrange(self.components)]
  27.             self.callback_counter = [0]
  28.             if batch_size is not None:
  29.                 if parallel:
  30.                     self.pool = multiprocessing.Pool(int(self.N / self.batch_size))
  31.                 else:
  32.                     self.params = np.concatenate([v.flatten() for v in opt_param_values])
  33.                     self.param_updates = np.zeros_like(self.params)
  34.                     self.moving_mean_squared = np.zeros_like(self.params)
  35.                     self.learning_rates = 1e-2*np.ones_like(self.params)
  36.      
  37.      
  38.         def unpack(self, x):
  39.             x_param_values = [x[self.sizes[i-1]:self.sizes[i]].reshape(self.shapes[i-1]) for i in xrange(1,len(self.shapes)+1)]
  40.             params = {n:v for (n,v) in zip(self.opt_param_names, x_param_values)}
  41.             if 'ltau' in params:
  42.                 params['ltau'] = params['ltau'].squeeze()
  43.             return params
  44.      
  45.         def func(self, x):
  46.             params = extend(self.fixed_params, self.unpack(x))
  47.             if self.batch_size is not None:
  48.                 X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
  49.                 if self.parallel:
  50.                     arguments = [(X[i::splits], Y[i::splits], params) for i in xrange(splits)]
  51.                     LL = sum(self.pool.map_async(eval_f_LL, arguments).get(9999999))
  52.                     KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  53.                 else:
  54.                     split = np.random.randint(splits)
  55.                     LL = self.N / self.batch_size * self.vssgp.f['LL'](**extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
  56.                     print(LL)
  57.                     KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  58.             else:
  59.                 params = extend(self.inputs, params)
  60.                 LL, KL = self.vssgp.f['LL'](**params), self.vssgp.f['KL'](**params)
  61.             return -(LL - KL)
  62.  
  63.         def fprime(self, x):
  64.             grads, params = [], extend(self.fixed_params, self.unpack(x))
  65.             for n in self.opt_param_names:
  66.                 if self.batch_size is not None:
  67.                     X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
  68.                     if self.parallel:
  69.                         arguments = [(n, X[i::splits], Y[i::splits], params) for i in xrange(splits)]
  70.                         dLL = sum(self.pool.map_async(eval_g_LL, arguments).get(9999999))
  71.                         dKL = self.vssgp.g[n]['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  72.                     else:
  73.                         split = np.random.randint(splits)
  74.                         dLL = self.N / self.batch_size * self.vssgp.g[n]['LL'](**extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
  75.                         dKL = self.vssgp.g[n]['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
  76.                 else:
  77.                     params = extend(self.inputs, params)
  78.                     dLL, dKL = self.vssgp.g[n]['LL'](**params), self.vssgp.g[n]['KL'](**params)
  79.                 grads += [-(dLL - dKL)]
  80.             return np.concatenate([grad.flatten() for grad in grads])
  81.      
  82.         def callback(self, x):
  83.             if self.callback_counter[0]%self.print_interval == 0:
  84.                 opt_params = self.unpack(x)
  85.                 params = extend(self.inputs, self.fixed_params, opt_params)
  86.                 LL = self.vssgp.f['LL'](**params)
  87.                 KL = self.vssgp.f['KL'](**params)
  88.                 print(LL - KL)
  89.             self.callback_counter[0] += 1


wegbekomme.

An der Stelle bin ich ausgestiegen.
Wie schütze ich WAS vor der Ausführung?
Entschuldige mein womöglich schlechtes Auffassungsvermögen.

Danke und Grüße

Wer ist online?

Mitglieder in diesem Forum: Bing [Bot]