これは、隠れ層が 1 つある単純なフィードフォワード ネットワークです。
import numpy as np
import tensorflow as tf
episodes = 55
batch_size = 5
hidden_units = 10
learning_rate = 1e-3
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# normalize the data and shuffle them
x_data = np.arange(0, 1, 0.005).astype(float)
np.random.shuffle(x_data)
y_data = np.abs(x_data + .1)
# reshape data ...
x_data = x_data.reshape(200, 1)
y_data = y_data.reshape(200, 1)
# create placeholders to pass the data to the model
x = tf.placeholder('float', shape=[None, 1])
y_ = tf.placeholder('float', shape=[None, 1])
W1 = weight_variable([1, hidden_units])
b1 = bias_variable([hidden_units])
h1 = tf.nn.relu(tf.matmul(x, W1) + b1)
W2 = weight_variable([hidden_units, 1])
b2 = bias_variable([1])
y = tf.matmul(h1, W2) + b2
mean_square_error = tf.reduce_sum(tf.square(y-y_))
training = tf.train.AdamOptimizer(learning_rate).minimize(mean_square_error)
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
for _ in xrange(episodes):
# iterrate trough every row (with batch size of 1)
for i in xrange(x_data.shape[0]-batch_size+1):
_, error = sess.run([training, mean_square_error], feed_dict={x: x_data[i:i+batch_size], y_:y_data[i:i+batch_size]})
#print error
print error, x_data[i:i+batch_size], y_data[i:i+batch_size]
error = sess.run([training, mean_square_error], feed_dict={x: x_data[i:i+batch_size], y_:y_data[i:i+batch_size]})
print error