Re: Fehlermeldung
Verfasst: Mittwoch 15. Februar 2017, 15:14
Du hast doch 'Im Grunde alles nach den Importen.' einige Posts zuvor geschrieben.
Im Post davor habe ich dir VSSGP_opt zitiert, also gehe ich davon aus, dass du dieses Modul meinst, aber eben alles ab Zeile 11,12 (eingeschlossen), wenn ich es hier noch einmal poste:
[codebox=python file=Unbenannt.txt] import numpy as np
from vssgp_model import VSSGP
import pylab
import multiprocessing
def extend(x, y, z = {}):
return dict(x.items() + y.items() + z.items())
def eval_f_LL(X, Y, params):
out_f = VSSGP.f['LL'](**extend({'X': X, 'Y': Y}, params))
return out_f
def eval_g_LL(name, X, Y, params):
out_g = VSSGP.f['LL'](**extend({'X': X, 'Y': Y}, params))
return out_g
class VSSGP_opt():
def __init__(self, N, Q, D, K, inputs, opt_params, fixed_params, use_exact_A = False, test_set = {},
parallel = False, batch_size = None, components = None, print_interval = None):
self.vssgp, self.N, self.Q, self.K, self.fixed_params = VSSGP(use_exact_A), N, Q, K, fixed_params
self.use_exact_A, self.parallel, self.batch_size = use_exact_A, parallel, batch_size
self.inputs, self.test_set = inputs, test_set
self.print_interval = 10 if print_interval is None else print_interval
self.opt_param_names = [n for n,_ in opt_params.iteritems()]
opt_param_values = [np.atleast_2d(opt_params[n]) for n in self.opt_param_names]
self.shapes = [v.shape for v in opt_param_values]
self.sizes = [sum([np.prod(x) for x in self.shapes[:i]]) for i in xrange(len(self.shapes)+1)]
self.components = opt_params['lSigma'].shape[2] if components is None else components
self.colours = [np.random.rand(3,1) for c in xrange(self.components)]
self.callback_counter = [0]
if batch_size is not None:
if parallel:
self.pool = multiprocessing.Pool(int(self.N / self.batch_size))
else:
self.params = np.concatenate([v.flatten() for v in opt_param_values])
self.param_updates = np.zeros_like(self.params)
self.moving_mean_squared = np.zeros_like(self.params)
self.learning_rates = 1e-2*np.ones_like(self.params)
def unpack(self, x):
x_param_values = [x[self.sizes[i-1]:self.sizes].reshape(self.shapes[i-1]) for i in xrange(1,len(self.shapes)+1)]
params = {n:v for (n,v) in zip(self.opt_param_names, x_param_values)}
if 'ltau' in params:
params['ltau'] = params['ltau'].squeeze()
return params
def func(self, x):
params = extend(self.fixed_params, self.unpack(x))
if self.batch_size is not None:
X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
if self.parallel:
arguments = [(X[i::splits], Y[i::splits], params) for i in xrange(splits)]
LL = sum(self.pool.map_async(eval_f_LL, arguments).get(9999999))
KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
else:
split = np.random.randint(splits)
LL = self.N / self.batch_size * self.vssgp.f['LL'](**extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
print(LL)
KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
else:
params = extend(self.inputs, params)
LL, KL = self.vssgp.f['LL'](**params), self.vssgp.f['KL'](**params)
return -(LL - KL)
def fprime(self, x):
grads, params = [], extend(self.fixed_params, self.unpack(x))
for n in self.opt_param_names:
if self.batch_size is not None:
X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
if self.parallel:
arguments = [(n, X[i::splits], Y[i::splits], params) for i in xrange(splits)]
dLL = sum(self.pool.map_async(eval_g_LL, arguments).get(9999999))
dKL = self.vssgp.g[n]['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
else:
split = np.random.randint(splits)
dLL = self.N / self.batch_size * self.vssgp.g[n]['LL'](**extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
dKL = self.vssgp.g[n]['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
else:
params = extend(self.inputs, params)
dLL, dKL = self.vssgp.g[n]['LL'](**params), self.vssgp.g[n]['KL'](**params)
grads += [-(dLL - dKL)]
return np.concatenate([grad.flatten() for grad in grads])
def callback(self, x):
if self.callback_counter[0]%self.print_interval == 0:
opt_params = self.unpack(x)
params = extend(self.inputs, self.fixed_params, opt_params)
LL = self.vssgp.f['LL'](**params)
KL = self.vssgp.f['KL'](**params)
print(LL - KL)
self.callback_counter[0] += 1[/code]
Und den Post danach schreibst du ' Nochmal: Das ist das falsche Modul!', nachdem ich dir eine vermeintlich trivialen Versuch einer Lösung liefere. Sorry aber ich verstehe es einfach nicht. DU könntest mir sehr helfen, wenn du einfach konkret wirst, indem du mir im Code zeigst, was du als das Modul meinst, weil anscheinend ist es ja schon da, da du schreibst 'Du führst doch nur eines davon als Programm aus'.
Nicht falsch verstehen, ich möchte wirklich etwas lernen...
Im Post davor habe ich dir VSSGP_opt zitiert, also gehe ich davon aus, dass du dieses Modul meinst, aber eben alles ab Zeile 11,12 (eingeschlossen), wenn ich es hier noch einmal poste:
[codebox=python file=Unbenannt.txt] import numpy as np
from vssgp_model import VSSGP
import pylab
import multiprocessing
def extend(x, y, z = {}):
return dict(x.items() + y.items() + z.items())
def eval_f_LL(X, Y, params):
out_f = VSSGP.f['LL'](**extend({'X': X, 'Y': Y}, params))
return out_f
def eval_g_LL(name, X, Y, params):
out_g = VSSGP.f['LL'](**extend({'X': X, 'Y': Y}, params))
return out_g
class VSSGP_opt():
def __init__(self, N, Q, D, K, inputs, opt_params, fixed_params, use_exact_A = False, test_set = {},
parallel = False, batch_size = None, components = None, print_interval = None):
self.vssgp, self.N, self.Q, self.K, self.fixed_params = VSSGP(use_exact_A), N, Q, K, fixed_params
self.use_exact_A, self.parallel, self.batch_size = use_exact_A, parallel, batch_size
self.inputs, self.test_set = inputs, test_set
self.print_interval = 10 if print_interval is None else print_interval
self.opt_param_names = [n for n,_ in opt_params.iteritems()]
opt_param_values = [np.atleast_2d(opt_params[n]) for n in self.opt_param_names]
self.shapes = [v.shape for v in opt_param_values]
self.sizes = [sum([np.prod(x) for x in self.shapes[:i]]) for i in xrange(len(self.shapes)+1)]
self.components = opt_params['lSigma'].shape[2] if components is None else components
self.colours = [np.random.rand(3,1) for c in xrange(self.components)]
self.callback_counter = [0]
if batch_size is not None:
if parallel:
self.pool = multiprocessing.Pool(int(self.N / self.batch_size))
else:
self.params = np.concatenate([v.flatten() for v in opt_param_values])
self.param_updates = np.zeros_like(self.params)
self.moving_mean_squared = np.zeros_like(self.params)
self.learning_rates = 1e-2*np.ones_like(self.params)
def unpack(self, x):
x_param_values = [x[self.sizes[i-1]:self.sizes].reshape(self.shapes[i-1]) for i in xrange(1,len(self.shapes)+1)]
params = {n:v for (n,v) in zip(self.opt_param_names, x_param_values)}
if 'ltau' in params:
params['ltau'] = params['ltau'].squeeze()
return params
def func(self, x):
params = extend(self.fixed_params, self.unpack(x))
if self.batch_size is not None:
X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
if self.parallel:
arguments = [(X[i::splits], Y[i::splits], params) for i in xrange(splits)]
LL = sum(self.pool.map_async(eval_f_LL, arguments).get(9999999))
KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
else:
split = np.random.randint(splits)
LL = self.N / self.batch_size * self.vssgp.f['LL'](**extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
print(LL)
KL = self.vssgp.f['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
else:
params = extend(self.inputs, params)
LL, KL = self.vssgp.f['LL'](**params), self.vssgp.f['KL'](**params)
return -(LL - KL)
def fprime(self, x):
grads, params = [], extend(self.fixed_params, self.unpack(x))
for n in self.opt_param_names:
if self.batch_size is not None:
X, Y, splits = self.inputs['X'], self.inputs['Y'], int(self.N / self.batch_size)
if self.parallel:
arguments = [(n, X[i::splits], Y[i::splits], params) for i in xrange(splits)]
dLL = sum(self.pool.map_async(eval_g_LL, arguments).get(9999999))
dKL = self.vssgp.g[n]['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
else:
split = np.random.randint(splits)
dLL = self.N / self.batch_size * self.vssgp.g[n]['LL'](**extend({'X': X[split::splits], 'Y': Y[split::splits]}, params))
dKL = self.vssgp.g[n]['KL'](**extend({'X': [[0]], 'Y': [[0]]}, params))
else:
params = extend(self.inputs, params)
dLL, dKL = self.vssgp.g[n]['LL'](**params), self.vssgp.g[n]['KL'](**params)
grads += [-(dLL - dKL)]
return np.concatenate([grad.flatten() for grad in grads])
def callback(self, x):
if self.callback_counter[0]%self.print_interval == 0:
opt_params = self.unpack(x)
params = extend(self.inputs, self.fixed_params, opt_params)
LL = self.vssgp.f['LL'](**params)
KL = self.vssgp.f['KL'](**params)
print(LL - KL)
self.callback_counter[0] += 1[/code]
Und den Post danach schreibst du ' Nochmal: Das ist das falsche Modul!', nachdem ich dir eine vermeintlich trivialen Versuch einer Lösung liefere. Sorry aber ich verstehe es einfach nicht. DU könntest mir sehr helfen, wenn du einfach konkret wirst, indem du mir im Code zeigst, was du als das Modul meinst, weil anscheinend ist es ja schon da, da du schreibst 'Du führst doch nur eines davon als Programm aus'.
Nicht falsch verstehen, ich möchte wirklich etwas lernen...