Tensorflow und Hardware.
heruntergeladen.
Es läuft ganz gut , bis die Ausgabe in 'Datensalat übergeht.
Der Inhalt ist hier nur von sekundären Bedeutung.
Ich weiß nicht, was noch an Ergebnissen erscheinen könnte.
Ichwerde ihn trotzdem untersuchen. Vielleicht bleibt noch etwas hängen..
Code: Alles auswählen
mport tensorflow as tf
import keras
from keras import layers
import tensorflow as tf
import keras
from keras import layers
# Define Sequential model with 3 layers
model = keras.Sequential(
[
layers.Dense(2, activation="relu", name="layer1"),
layers.Dense(3, activation="relu", name="layer2"),
layers.Dense(4, name="layer3"),
]
)
# Call model on a test input
x = tf.ones((3, 3))
y = model(x)
# Create 3 layers
layer1 = layers.Dense(2, activation="relu", name="layer1")
layer2 = layers.Dense(3, activation="relu", name="layer2")
layer3 = layers.Dense(4, name="layer3")
# Call layers on a test input
x = tf.ones((3, 3))
y = layer3(layer2(layer1(x)))
model = keras.Sequential(
[
layers.Dense(2, activation="relu"),
layers.Dense(3, activation="relu"),
layers.Dense(4),
]
)
model.layers
model = keras.Sequential()
model.add(layers.Dense(2, activation="relu"))
model.add(layers.Dense(3, activation="relu"))
model.add(layers.Dense(4))
model.pop()
print(len(model.layers)) # 2
model = keras.Sequential(name="my_sequential")
model.add(layers.Dense(2, activation="relu", name="layer1"))
model.add(layers.Dense(3, activation="relu", name="layer2"))
model.add(layers.Dense(4, name="layer3"))
layer = layers.Dense(3)
layer.weights # Empty
[]
# Call layer on a test input
x = tf.ones((1, 4))
y = layer(x)
layer.weights # Now it has weights, of shape (4, 3) and (3,)
#[<tf.Variable 'dense_6/kernel:0' shape=(4, 3) dtype=float32, numpy=
# array([[ 0.1752373 , 0.47623062, 0.24374962],
# [-0.0298934 , 0.50255656, 0.78478384],
# [-0.58323103, -0.56861055, -0.7190975 ],
# [-0.3191281 , -0.23635858, -0.8841506 ]], dtype=float32)>,
# <tf.Variable 'dense_6/bias:0' shape=(3,) dtype=float32, numpy=array([0., 0., 0.], dtype=float32)>]
model = keras.Sequential(
[
layers.Dense(2, activation="relu"),
layers.Dense(3, activation="relu"),
layers.Dense(4),
]
) # No weights at this stage!
# At this point, you can't do this:
# model.weights
# You also can't do this:
# model.summary()
# Call the model on a test input
x = tf.ones((1, 4))
y = model(x)
print("Number of weights after calling the model:", len(model.weights)) # 6
model.summary()
Model: "sequential_3"
#_________________________________________________________________
# Layer (type) Output Shape Param #
#=================================================================
# dense_7 (Dense) (1, 2) 10
# dense_8 (Dense) (1, 3) 9
# dense_9 (Dense) (1, 4) 16
#Total params: 35 (140.00 Byte)
#Trainable params: 35 (140.00 Byte)
#Non-trainable params: 0 (0.00 Byte)
model = keras.Sequential()
model.add(keras.Input(shape=(4,)))
model.add(layers.Dense(2, activation="relu"))
model.summary()
Model: "sequential_4"
# Layer (type) Output Shape Param #
#=================================================================
# dense_10 (Dense) (None, 2) 10
#Total params: 10 (40.00 Byte)
#Trainable params: 10 (40.00 Byte)
#Non-trainable params: 0 (0.00 Byte)
#
#model.layers
#A simple alternative is to just pass an input_shape argument to your first layer:
model = keras.Sequential()
model.add(layers.Dense(2, activation="relu", input_shape=(4,)))
model.summary()
Model: "sequential_5"
#Total params: 10 (40.00 Byte)
#Trainable params: 10 (40.00 Byte)
#Non-trainable params: 0 (0.00 Byte)
#_________________________________________________________________
model = keras.Sequential()
model.add(keras.Input(shape=(250, 250, 3))) # 250x250 RGB images
model.add(layers.Conv2D(32, 5, strides=2, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(3))
# Can you guess what the current output shape is at this point? Probably not.
# Let's just print it:
model.summary()
# The answer was: (40, 40, 32), so we can keep downsampling...
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(3))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(layers.MaxPooling2D(2))
# And now?
model.summary()
# Now that we have 4x4 feature maps, time to apply global max pooling.
model.add(layers.GlobalMaxPooling2D())
# Finally, we add a classification layer.
model.add(layers.Dense(10))
Model: "sequential_6"
# Layer (type) Output Shape Param #
#=================================================================
# conv2d (Conv2D) (None, 123, 123, 32) 2432
# conv2d_1 (Conv2D) (None, 121, 121, 32) 9248
# max_pooling2d (MaxPooling2 (None, 40, 40, 32) 0
# D)
#Total params: 11680 (45.62 KB)
#Trainable params: 11680 (45.62 KB)
#Non-trainable params: 0 (0.00 Byte)
#_________________________________________________________________
#Model: "sequential_6"
#=================================================================
#conv2d (Conv2D) (None, 123, 123, 32) 2432
# conv2d_1 (Conv2D) (None, 121, 121, 32) 9248
# max_pooling2d (MaxPooling2 (None, 40, 40, 32) 0
# D)
# conv2d_2 (Conv2D) (None, 38, 38, 32) 9248
# conv2d_3 (Conv2D) (None, 36, 36, 32) 9248
# max_pooling2d_1 (MaxPoolin (None, 12, 12, 32) 0
# g2D)
# conv2d_4 (Conv2D) (None, 10, 10, 32) 9248
# conv2d_5 (Conv2D) (None, 8, 8, 32) 9248
#max_pooling2d_2 (MaxPoolin (None, 4, 4, 32) 0
# g2D)
#================================================================
#Total params: 48672 (190.12 KB)
#Trainable params: 48672 (190.12 KB)
#Non-trainable params: 0 (0.00 Byte)
initial_model = keras.Sequential(
[
keras.Input(shape=(250, 250, 3)),
layers.Conv2D(32, 5, strides=2, activation="relu"),
layers.Conv2D(32, 3, activation="relu"),
layers.Conv2D(32, 3, activation="relu"),
]
)
feature_extractor = keras.Model(
inputs=initial_model.inputs,
outputs=[layer.output for layer in initial_model.layers],
)
# Call feature extractor on test input.
x = tf.ones((1, 250, 250, 3))
features = feature_extractor(x)
initial_model = keras.Sequential(
[
keras.Input(shape=(250, 250, 3)),
layers.Conv2D(32, 5, strides=2, activation="relu"),
layers.Conv2D(32, 3, activation="relu", name="my_intermediate_layer"),
layers.Conv2D(32, 3, activation="relu"),
]
)
feature_extractor = keras.Model(
inputs=initial_model.inputs,
outputs=initial_model.get_layer(name="my_intermediate_layer").output,
)
# Call feature extractor on test input.
x = tf.ones((1, 250, 250, 3))
features = feature_extractor(x)
#model = keras.Sequential([ keras.Input(shape=(784)),
# layers.Dense(32, activation='relu'),
# layers.Dense(32, activation='relu'),
# layers.Dense(10)])
# Presumably you would want to first load pre-trained weights.
#
# Freeze all layers except the last one.
for layer in model.layers[:-1]:
layer.trainable = False
# Recompile and train (this will only update the weights of the last layer).
# Load a convolutional base with pre-trained weights
base_model = keras.applications.Xception(
weights='imagenet',
include_top=False,
pooling='avg')
# Freeze the base model
base_model.trainable = False
# Use a Sequential model to add a trainable classifier on top
model = keras.Sequential([
base_model,
layers.Dense(1000),