Difference between r1.1 and the current
@@ -1,11 +1,13 @@
[[pagelist(^(머신러닝스터디/2016))]]
[머신러닝스터디/2016] 
[머신러닝스터디/2016/목차]
 == 내용 ==
 * Tensorflow Example code에서는 Softmax를 사용했다. 우리는 이 예제를 변형해서 Sigmoid와 Cross Entropy를 직접 구현해본다.
  * [https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2%20-%20Basic%20Classifiers/logistic_regression.py Logistic Regression 예제코드]
 == 코드 == 
=== 코드 ===
 {{{# python3
# python
 import tensorflow as tfimport input_data
@@ -19,7 +21,6 @@
     # Cross Entropy
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
learning_rate = 0.01
    # x = tf.placeholder(tf.float32, shape=(None, 28, 28))
     x = tf.placeholder(tf.float32, shape=(None, 28 * 28))
y = tf.placeholder(tf.float32, shape=(None, 10))
 
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
learning_rate = 0.01
y = tf.placeholder(tf.float32, shape=(None, 10))
@@ -31,8 +32,6 @@
     # h = n * 10
h = tf.sigmoid(tf.matmul(x, weight) + bias)
 
    #cost = tf.reduce_sum(tf.add(y * tf.log(h), tf.sub(1.0, y) * tf.log(tf.sub(1.0, h)))))
     cost = tf.reduce_mean(-(tf.reduce_sum(y * tf.log(h), 1) + tf.reduce_sum((1. -y) * tf.log(1. -h), 1)))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
 
h = tf.sigmoid(tf.matmul(x, weight) + bias)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
@@ -63,3 +62,8 @@
         accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
}}}
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
}}}
== 후기 ==
 * [서지혜]: 페어용 키보드는 좋은 생각이었다.
== 다음 시간에는 ==
 * ML Week 4 Neural Networks까지  
== 더 보기 ==
내용 ¶
- Tensorflow Example code에서는 Softmax를 사용했다. 우리는 이 예제를 변형해서 Sigmoid와 Cross Entropy를 직접 구현해본다.
 
코드 ¶
# python
import tensorflow as tf
import input_data
if __name__ == "__main__":
    # Logistic Regression
    # use sigmoid
    # Input
    # MNIST
    # Cross Entropy
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    learning_rate = 0.01
    x = tf.placeholder(tf.float32, shape=(None, 28 * 28))
    y = tf.placeholder(tf.float32, shape=(None, 10))
    # x = n * (28 * 28)
    # weight = (28 * 28) * 10
    weight = tf.Variable(tf.zeros((28 * 28, 10)))
    bias = tf.Variable(tf.zeros((10,)))
    # h = n * 10
    h = tf.sigmoid(tf.matmul(x, weight) + bias)
    cost = tf.reduce_mean(-(tf.reduce_sum(y * tf.log(h), 1) + tf.reduce_sum((1. -y) * tf.log(1. -h), 1)))
    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
    init = tf.initialize_all_variables()
    batch_size = 100
    display_step = 1
    with tf.Session() as sess:
        sess.run(init)
        # Training cycle
        for epoch in range(10):
            avg_cost = 0.
            total_batch = int(mnist.train.num_examples/batch_size)
            # Loop over all batches
            for i in range(total_batch):
                batch_xs, batch_ys = mnist.train.next_batch(batch_size)
                # Fit training using batch data
                sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys})
                # Compute average loss
                avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})/total_batch
            # Display logs per epoch step
            if epoch % display_step == 0:
                print( "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
        # Test model
        correct_prediction = tf.equal(tf.argmax(h, 1), tf.argmax(y, 1))
        # Calculate accuracy
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
다음 시간에는 ¶
- ML Week 4 Neural Networks까지  
 













