Fehlermeldung

Wenn du dir nicht sicher bist, in welchem der anderen Foren du die Frage stellen sollst, dann bist du hier im Forum für allgemeine Fragen sicher richtig.
Romaxx
User
Beiträge: 62
Registriert: Donnerstag 26. Januar 2017, 18:53

Hallo zusammen,

gibt es niemanden, der sich den Fehler vor 2 Posts mal näher anschauen kann?
Wieso kann nicht 'gepickled' werden?

Mein naives Verständnis von diesem Vorgang ist, dass python hier versucht die Berechnungen aufzuteilen und das das aus irgendeinem Grund nicht funktioniert. Wie schon gesagt sollte diese Demo aber auf einer GPU funktionieren. Ich habe auf meiner weiteren Recherche diesen Thread gefunden:
https://bytes.com/topic/python/answers/ ... ncemethods . Steven Bethard liefert glaube ich dazu eine Lösung (Kann mir das jemand bestätigen?). Nur ist mir nicht ganz klar, wie ich das übertragen kann.


Vielen Dank und Grüße.

Romaxx
Romaxx
User
Beiträge: 62
Registriert: Donnerstag 26. Januar 2017, 18:53

Ich habe den Fehler nun mit pathos multiprocessing wegbekommen. Siehe code unten.

[codebox=python file=Unbenannt.txt]import numpy as np
from vssgp_model import VSSGP
# import multiprocessing
import pathos.multiprocessing as mp

class VSSGP_opt():
def __init__(self, N, Q, D, K, inputs, opt_params, fixed_params, use_exact_A = False, test_set = {},
parallel = False, batch_size = None, components = None, print_interval = None):
self.vssgp, self.N, self.Q, self.K, self.fixed_params = VSSGP(use_exact_A), N, Q, K, fixed_params
self.use_exact_A, self.parallel, self.batch_size = use_exact_A, parallel, batch_size
self.inputs, self.test_set = inputs, test_set
self.print_interval = 10 if print_interval is None else print_interval
self.opt_param_names = [n for n,_ in opt_params.iteritems()]
opt_param_values = [np.atleast_2d(opt_params[n]) for n in self.opt_param_names]
self.shapes = [v.shape for v in opt_param_values]
self.sizes = [sum([np.prod(x) for x in self.shapes[:i]]) for i in xrange(len(self.shapes)+1)]
self.components = opt_params['lSigma'].shape[2] if components is None else components
self.colours = [np.random.rand(3,1) for c in xrange(self.components)]
self.callback_counter = [0]
if batch_size is not None:
if parallel:
self.pool = mp.ProcessingPool(int(self.N / self.batch_size))
else:
self.params = np.concatenate([v.flatten() for v in opt_param_values])
self.param_updates = np.zeros_like(self.params)
self.moving_mean_squared = np.zeros_like(self.params)
self.learning_rates = 1e-2*np.ones_like(self.params)

def extend(self, x, y, z = {}):

return dict(x.items() + y.items() + z.items())

def eval_f_LL(self, arguments):
out_f = self.vssgp.f['LL'](**arguments)
return (out_f)

def eval_g_LL(self, arguments):
out_g = self.vssgp.g['LL'](**arguments)
return (out_g)

def unpack(self, x):
x_param_values = [x[self.sizes[i-1]:self.sizes].reshape(self.shapes[i-1]) for i in xrange(1,len(self.shapes)+1)]
params = {n:v for (n,v) in zip(self.opt_param_names, x_param_values)}
if 'ltau' in params:
params['ltau'] = params['ltau'].squeeze()
return params

def func(self, x):
params = self.extend(self.fixed_params, self.unpack(x))
if self.batch_size is not None:
X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
if self.parallel:
arguments = [(X[i::splits], Y[i::splits], params) for i in xrange(splits)]
LL = sum(self.pool.map(self.eval_f_LL, arguments).get(9999999))
KL = self.vssgp.f['KL'](**self.extend({'X': [[0]], 'Y': [[0]]}, params))
else:
split = np.random.randint(splits)
LL = self.N / self.batch_size * self.vssgp.f['LL'](**self.extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
print LL
KL = self.vssgp.f['KL'](**self.extend({'X': [[0]], 'Y': [[0]]}, params))
else:
params = self.extend(self.inputs, params)
LL, KL = self.vssgp.f['LL'](**params), self.vssgp.f['KL'](**params)
return -(LL - KL)

def fprime(self, x):
grads, params = [], self.extend(self.fixed_params, self.unpack(x))
for n in self.opt_param_names:
if self.batch_size is not None:
X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
if self.parallel:
arguments = [(n, X[i::splits], Y[i::splits], params) for i in xrange(splits)]
dLL = sum(self.pool.map(self.eval_g_LL, arguments).get(9999999))
dKL = self.vssgp.g[n]['KL'](**self.extend({'X': [[0]], 'Y': [[0]]}, params))
else:
split = np.random.randint(splits)
dLL = self.N / self.batch_size * self.vssgp.g[n]['LL'](**self.extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
dKL = self.vssgp.g[n]['KL'](**self.extend({'X': [[0]], 'Y': [[0]]}, params))
else:
params = self.extend(self.inputs, params)
dLL, dKL = self.vssgp.g[n]['LL'](**params), self.vssgp.g[n]['KL'](**params)
grads += [-(dLL - dKL)]
return np.concatenate([grad.flatten() for grad in grads])

def callback(self, x):
if self.callback_counter[0]%self.print_interval == 0:
opt_params = self.unpack(x)
params = self.extend(self.inputs, self.fixed_params, opt_params)
LL = self.vssgp.f['LL'](**params)
KL = self.vssgp.f['KL'](**params)
print LL - KL
self.callback_counter[0] += 1[/code]

Nun erhalte ich aber die Fehler:

Code: Alles auswählen

Traceback (most recent call last):

  File "<ipython-input-2-f919e99b6eea>", line 1, in <module>
    if __name__== '__main__' : STARTME()

  File "C:/Users/flo9fe/Desktop/DEEPvSSGP/vssgp_example.py", line 50, in STARTME
    options={'ftol': 0, 'disp': False, 'maxiter': 500}, tol=0, callback=vssgp_opt.callback)

  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\_minimize.py", line 450, in minimize
    callback=callback, **options)

  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\lbfgsb.py", line 328, in _minimize_lbfgsb
    f, g = func_and_grad(x)

  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\lbfgsb.py", line 278, in func_and_grad
    f = fun(x, *args)

  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\optimize.py", line 292, in function_wrapper
    return function(*(wrapper_args + args))

  File "vssgp_opt.py", line 54, in func
    LL = sum(self.pool.map(self.eval_f_LL, arguments).get(9999999))

  File "C:\Program Files\Anaconda2\lib\site-packages\pathos\multiprocessing.py", line 136, in map
    return _pool.map(star(f), zip(*args)) # chunksize

  File "C:\Program Files\Anaconda2\lib\site-packages\multiprocess\pool.py", line 251, in map
    return self.map_async(func, iterable, chunksize).get()

  File "C:\Program Files\Anaconda2\lib\site-packages\multiprocess\pool.py", line 567, in get
    raise self._value

RuntimeError: maximum recursion depth exceeded in __subclasscheck__

Jemand der mit hier weiterhelfen kann?

Danke und Grüße
Romaxx
User
Beiträge: 62
Registriert: Donnerstag 26. Januar 2017, 18:53

Ok, auch dieser Fehler scheint behoben, habe einfach

[codebox=python file=Unbenannt.txt]import pathos.multiprocessing as mp[/code]

durch

[codebox=python file=Unbenannt.txt]from pathos.pools import ThreadPool[/code]

und

[codebox=python file=Unbenannt.txt]mp.ProcessingPool[/code]

durch

[codebox=python file=Unbenannt.txt]ThreadPool[/code].

Nächster Fehler ist dies hier

Code: Alles auswählen

Traceback (most recent call last):

  File "<ipython-input-2-f919e99b6eea>", line 1, in <module>
    if __name__== '__main__' : STARTME()

  File "C:/Users/flo9fe/Desktop/DEEPvSSGP/vssgp_example.py", line 50, in STARTME
    options={'ftol': 0, 'disp': False, 'maxiter': 500}, tol=0, callback=vssgp_opt.callback)

  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\_minimize.py", line 450, in minimize
    callback=callback, **options)

  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\lbfgsb.py", line 328, in _minimize_lbfgsb
    f, g = func_and_grad(x)

  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\lbfgsb.py", line 278, in func_and_grad
    f = fun(x, *args)

  File "C:\Program Files\Anaconda2\lib\site-packages\scipy\optimize\optimize.py", line 292, in function_wrapper
    return function(*(wrapper_args + args))

  File "vssgp_opt.py", line 54, in func
    LL = sum(self.pool.map(self.eval_f_LL, arguments).get(9999999))

  File "C:\Program Files\Anaconda2\lib\site-packages\pathos\threading.py", line 133, in map
    return _pool.map(star(f), zip(*args)) # chunksize

  File "C:\Program Files\Anaconda2\lib\site-packages\multiprocess\pool.py", line 251, in map
    return self.map_async(func, iterable, chunksize).get()

  File "C:\Program Files\Anaconda2\lib\site-packages\multiprocess\pool.py", line 567, in get
    raise self._value

TypeError: Function object argument after ** must be a mapping, not tuple
Jemand der mir sagen kann, was zu verbessern ist? Danke
Antworten