import numpy as np
from tensorflow.keras .models import Sequential
from tensorflow.keras .layers import Embedding, SimpleRNN, Dense
# Generate some example data
num_samples = 1000
sequence_length = 10
vocab_size = 10000
X = np.random .randint ( vocab_size, size= ( num_samples, sequence_length) )
y = np.random .randint ( 2 , size= num_samples)
# Build the RNN model
model = Sequential( )
model.add ( Embedding( vocab_size, 32 , input_length= sequence_length) )
model.add ( SimpleRNN( 64 ) )
model.add ( Dense( 1 , activation= 'sigmoid' ) )
model.compile ( optimizer= 'adam' , loss= 'binary_crossentropy' , metrics= [ 'accuracy' ] )
# Train the model
model.fit ( X, y, epochs= 10 , batch_size= 32 , validation_split= 0.2 )
aW1wb3J0IG51bXB5IGFzIG5wCmZyb20gdGVuc29yZmxvdy5rZXJhcy5tb2RlbHMgaW1wb3J0IFNlcXVlbnRpYWwKZnJvbSB0ZW5zb3JmbG93LmtlcmFzLmxheWVycyBpbXBvcnQgRW1iZWRkaW5nLCBTaW1wbGVSTk4sIERlbnNlCiMgR2VuZXJhdGUgc29tZSBleGFtcGxlIGRhdGEKbnVtX3NhbXBsZXMgPSAxMDAwCnNlcXVlbmNlX2xlbmd0aCA9IDEwCnZvY2FiX3NpemUgPSAxMDAwMApYID0gbnAucmFuZG9tLnJhbmRpbnQodm9jYWJfc2l6ZSxzaXplPShudW1fc2FtcGxlcyxzZXF1ZW5jZV9sZW5ndGgpKQp5ID0gbnAucmFuZG9tLnJhbmRpbnQoMiwgc2l6ZT1udW1fc2FtcGxlcykKIyBCdWlsZCB0aGUgUk5OIG1vZGVsCm1vZGVsID0gU2VxdWVudGlhbCgpCm1vZGVsLmFkZChFbWJlZGRpbmcodm9jYWJfc2l6ZSwgMzIsIGlucHV0X2xlbmd0aD1zZXF1ZW5jZV9sZW5ndGgpKQptb2RlbC5hZGQoU2ltcGxlUk5OKDY0KSkKbW9kZWwuYWRkKERlbnNlKDEsIGFjdGl2YXRpb249J3NpZ21vaWQnKSkKbW9kZWwuY29tcGlsZShvcHRpbWl6ZXI9J2FkYW0nLCBsb3NzPSdiaW5hcnlfY3Jvc3NlbnRyb3B5JywgbWV0cmljcz1bJ2FjY3VyYWN5J10pCiMgVHJhaW4gdGhlIG1vZGVsCm1vZGVsLmZpdChYLCB5LCBlcG9jaHM9MTAsIGJhdGNoX3NpemU9MzIsIHZhbGlkYXRpb25fc3BsaXQ9MC4yKQo=
stdout
Train on 800 samples, validate on 200 samples
Epoch 1/10
32/800 [>.............................] - ETA: 12s - loss: 0.6876 - acc: 0.6250
288/800 [=========>....................] - ETA: 1s - loss: 0.6936 - acc: 0.5069
576/800 [====================>.........] - ETA: 0s - loss: 0.6925 - acc: 0.4931
800/800 [==============================] - 1s 925us/sample - loss: 0.6911 - acc: 0.5200 - val_loss: 0.6958 - val_acc: 0.4850
Epoch 2/10
32/800 [>.............................] - ETA: 0s - loss: 0.5925 - acc: 0.9062
288/800 [=========>....................] - ETA: 0s - loss: 0.5567 - acc: 0.9514
544/800 [===================>..........] - ETA: 0s - loss: 0.4807 - acc: 0.9449
800/800 [==============================] - 0s 209us/sample - loss: 0.3929 - acc: 0.9475 - val_loss: 1.1414 - val_acc: 0.4900
Epoch 3/10
32/800 [>.............................] - ETA: 0s - loss: 0.0218 - acc: 1.0000
288/800 [=========>....................] - ETA: 0s - loss: 0.0185 - acc: 0.9931
576/800 [====================>.........] - ETA: 0s - loss: 0.0125 - acc: 0.9965
800/800 [==============================] - 0s 213us/sample - loss: 0.0100 - acc: 0.9975 - val_loss: 1.8613 - val_acc: 0.5050
Epoch 4/10
32/800 [>.............................] - ETA: 0s - loss: 0.0016 - acc: 1.0000
288/800 [=========>....................] - ETA: 0s - loss: 0.0011 - acc: 1.0000
544/800 [===================>..........] - ETA: 0s - loss: 0.0012 - acc: 1.0000
800/800 [==============================] - 0s 209us/sample - loss: 0.0010 - acc: 1.0000 - val_loss: 1.9508 - val_acc: 0.5000
Epoch 5/10
32/800 [>.............................] - ETA: 0s - loss: 7.2196e-04 - acc: 1.0000
288/800 [=========>....................] - ETA: 0s - loss: 6.8007e-04 - acc: 1.0000
544/800 [===================>..........] - ETA: 0s - loss: 6.7829e-04 - acc: 1.0000
800/800 [==============================] - 0s 218us/sample - loss: 6.6577e-04 - acc: 1.0000 - val_loss: 1.9890 - val_acc: 0.5050
Epoch 6/10
32/800 [>.............................] - ETA: 0s - loss: 6.4435e-04 - acc: 1.0000
288/800 [=========>....................] - ETA: 0s - loss: 5.7625e-04 - acc: 1.0000
544/800 [===================>..........] - ETA: 0s - loss: 5.7409e-04 - acc: 1.0000
800/800 [==============================] - 0s 206us/sample - loss: 5.6187e-04 - acc: 1.0000 - val_loss: 2.0264 - val_acc: 0.4950
Epoch 7/10
32/800 [>.............................] - ETA: 0s - loss: 4.9777e-04 - acc: 1.0000
288/800 [=========>....................] - ETA: 0s - loss: 4.9924e-04 - acc: 1.0000
576/800 [====================>.........] - ETA: 0s - loss: 5.0001e-04 - acc: 1.0000
800/800 [==============================] - 0s 210us/sample - loss: 4.8474e-04 - acc: 1.0000 - val_loss: 2.0616 - val_acc: 0.4950
Epoch 8/10
32/800 [>.............................] - ETA: 0s - loss: 3.8722e-04 - acc: 1.0000
320/800 [===========>..................] - ETA: 0s - loss: 4.3908e-04 - acc: 1.0000
576/800 [====================>.........] - ETA: 0s - loss: 4.2555e-04 - acc: 1.0000
800/800 [==============================] - 0s 206us/sample - loss: 4.2353e-04 - acc: 1.0000 - val_loss: 2.0946 - val_acc: 0.4950
Epoch 9/10
32/800 [>.............................] - ETA: 0s - loss: 3.7789e-04 - acc: 1.0000
288/800 [=========>....................] - ETA: 0s - loss: 3.8296e-04 - acc: 1.0000
544/800 [===================>..........] - ETA: 0s - loss: 3.7374e-04 - acc: 1.0000
800/800 [==============================] - 0s 209us/sample - loss: 3.7394e-04 - acc: 1.0000 - val_loss: 2.1265 - val_acc: 0.4950
Epoch 10/10
32/800 [>.............................] - ETA: 0s - loss: 3.1559e-04 - acc: 1.0000
288/800 [=========>....................] - ETA: 0s - loss: 3.3319e-04 - acc: 1.0000
544/800 [===================>..........] - ETA: 0s - loss: 3.3761e-04 - acc: 1.0000
800/800 [==============================] - 0s 208us/sample - loss: 3.3285e-04 - acc: 1.0000 - val_loss: 2.1576 - val_acc: 0.4950
stderr
WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.