Preisbestimmung einer Aktie mithilfe MonteCarlo Simulation und TensorFlow Deep Learning

mit matplotlib, NumPy, pandas, SciPy, SymPy und weiteren mathematischen Programmbibliotheken.
Antworten
Joey20
User
Beiträge: 9
Registriert: Dienstag 22. Januar 2019, 23:38

Hi Leute,

ich bin dabei für ein Projekt an der Universität , eine amerikanische Put Option zu bewerten.

Code: Alles auswählen

def get_continuation_function():
    X = tf.placeholder(tf.float32, (None,1),name="X")
    y = tf.placeholder(tf.float32, (None,1),name="y")
    w = tf.Variable(tf.random_uniform((1,1))*0.1,name="w")
    b = tf.Variable(initial_value=tf.ones(1)*1,name="b")
    y_hat = tf.add(tf.matmul(X,w),b)
    pre_error = tf.pow(y-y_hat,2)
    error = tf.reduce_mean(pre_error)
    train = tf.train.AdamOptimizer(0.1).minimize(error)
    return(X, y, train, w, b, y_hat)
    


def pricing_function(number_call_dates):
    S = tf.placeholder(tf.float32,name="S")
    # First excerise date
    dts = tf.placeholder(tf.float32,name="dts")
    # 2nd exersice date
    K = tf.placeholder(tf.float32,name="K")
    r = tf.placeholder(tf.float32,name="r")
    sigma = tf.placeholder(tf.float32,name="sigma")
    dW = tf.placeholder(tf.float32,name="dW") 
    
    S_t = S * tf.cumprod(tf.exp((r-sigma**2/2)*dts + sigma*tf.sqrt(dts)*dW), axis=1)
    
    E_t = tf.exp(-r*tf.cumsum(dts))*tf.maximum(K-S_t, 0)

    
    continuationValues = []
    training_functions = []
    
    previous_exersies = 0
    npv = 0
    for i in range(number_call_dates-1):
        (input_x, input_y, train, w, b, y_hat) = get_continuation_function()
        training_functions.append((input_x, input_y, train, w, b, y_hat))
        X = tf.keras.activations.relu(S_t[:, i])
        contValue = tf.add(tf.matmul(X, w),b)
        continuationValues.append(contValue)
        inMoney = tf.cast(tf.greater(E_t[:,i], 0.), tf.float32)
        exercise = tf.cast(tf.greater(E_t[:,i], contValue[:,0]), tf.float32) * inMoney * (1-previous_exersies)
        previous_exersies += exercise
        npv += exercise*E_t[:,i]
    
    # Last exercise date
    inMoney = tf.cast(tf.greater(E_t[:,-1], 0.), tf.float32)
    exercise =  inMoney * (1-previous_exersies)
    npv += exercise*E_t[:,-1]
    npv = tf.reduce_mean(npv)
    #greeks = tf.gradients(npv, [S, r, sigma])
    return([S, dts, K, r, sigma,dW, S_t, E_t, npv, training_functions]) 
    
    
    def american_tf(S_0,strike,M,impliedvol,riskfree_r,random_train,random_pricing):
    n_exercise = len(M)
    with tf.Session() as sess:
        
        S,dts,K,r,sigma,dW,S_t,E_t,npv,training_functions = pricing_function(n_exercise)
        sess.run(tf.global_variables_initializer())
        paths, exercise_values = sess.run([S_t,E_t], {
            S:S_0,
            dts:M,
            K:strike,
            r:riskfree_r,
            sigma:impliedvol,
            dW:random_train
        })
        
        for i in range(n_exercise-1)[::-1]:
            (input_x,input_y,train,w,b,y_hat) = training_functions[i]
            y= exercise_values[:,i+1:i+2]
            X = paths[:,i]
            
            for epochs in range(100):
                _ = sess.run(train, {input_x:X[exercise_values[:,i]>0].reshape(len(X[exercise_values[:,i]>0]),1),
                                     input_y:y[exercise_values[:,i]>0].reshape(len(y[exercise_values[:,i]>0]),1)})
                cont_value = sess.run(y_hat, {input_x:X.reshape(len(X),1), input_y:y.reshape(len(y),1)})   
                
                exercise_values[:,i+1:i+2] = np.maximum(exercise_values[:,i+1:i+2],cont_value)
                
        npv = sess.run(npv, {S:S_0,K:strike,dts:M,r:riskfree_r,sigma:impliedvol,dW:N_pricing})
            
            
        return npv
        
        
        
N_samples_learn = 1000
N_samples_pricing = 1000
calldates=12
N= np.random.randn(N_samples_learn,calldates)
N_pricing = np.random.randn(N_samples_pricing,calldates)


american_tf(100.,90.,[1.]*calldates,0.25,0.05,N,N_pricing)

Ich hab hierfür zwei Hilfsfunktionen eingebaut , einmal get_continuation_function welches die TensorFlow Operatoren herstellt und dann pricing_function
Mein npv Operator ist die Summe der optimalen Ausübungen.

Und die Preisbestimmung erfolgt mit der Funktion American_tf.
Ich führe die Funktion aus, um die Pfade zu erstellen, die exercise Values für den Trainingspfad. Dann durchlaufe ich rückwärts durch die training_functions und lerne den Wert und die Entscheidung an jedem Übungstag.

M sind die Zeitschritte



Soweit so gut , leider ist jetzt meine Fehlermeldung nicht nachvollziehbar für mich

InvalidArgumentError: In[0] is not a matrix
[[Node: MatMul_611 = MatMul[T=DT_FLOAT, transpose_a=false, transpose_b=false, _device="/job:localhost/replica:0/task:0/device:CPU:0"](Relu_241, w_65/read)]]

During handling of the above exception, another exception occurred:

InvalidArgumentError Traceback (most recent call last)
<ipython-input-636-2c998f9d6460> in <module>()
----> 1 american_tf(100.,90.,[1.]*calldates,0.25,0.05,N,N_pricing)

<ipython-input-634-01d7fd98f307> in american_tf(S_0, strike, M, impliedvol, riskfree_r, random_train, random_pricing)
26 exercise_values[:,i+1:i+2] = np.maximum(exercise_values[:,i+1:i+2],cont_value)
27
---> 28 npv = sess.run(npv, {S:S_0,K:strike,dts:M,r:riskfree_r,sigma:impliedvol,dW:N_pricing})
Antworten