Multi-Layer Perceptron
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Create two clusters of red points centered at (0, 0) and (1, 1), respectively.
red_points = np.concatenate((
0.2*np.random.randn(25, 2) + np.array([[0, 0]]*25),
0.2*np.random.randn(25, 2) + np.array([[1, 1]]*25)
))
# Create two clusters of blue points centered at (0, 1) and (1, 0), respectively.
blue_points = np.concatenate((
0.2*np.random.randn(25, 2) + np.array([[0, 1]]*25),
0.2*np.random.randn(25, 2) + np.array([[1, 0]]*25)
))
# Create training input placeholder
X = tf.placeholder(dtype=tf.float64)
# Create placeholder for the training classes
c = tf.placeholder(dtype=tf.float64)
# Build a hidden layer
W_hidden = tf.Variable(np.random.randn(2, 2))
b_hidden = tf.Variable(np.random.randn(2))
p_hidden = tf.sigmoid( tf.add(tf.matmul(X, W_hidden), b_hidden) )
# Build the output layer
W_output = tf.Variable(np.random.randn(2, 2))
b_output = tf.Variable(np.random.randn(2))
p_output = tf.nn.softmax( tf.add(tf.matmul(p_hidden, W_output), b_output) )
# Build cross-entropy loss
J = tf.negative(tf.reduce_sum(tf.reduce_sum(tf.multiply(c, tf.log(p_output)), axis=1)))
# Build minimization op
minimization_op = tf.train.GradientDescentOptimizer(learning_rate = 0.01).minimize(J)
# Build placeholder inputs
feed_dict = {
X: np.concatenate((blue_points, red_points)),
c:
[[1, 0]] * len(blue_points)
+ [[0, 1]] * len(red_points)
}
# Create session
session = tf.Session()
# Initialize variables
session.run(tf.global_variables_initializer())
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX