U E D R , A S I H C RSS

머신러닝스터디/2016/2016_05_28

Difference between r1.1 and the current

@@ -1,35 +1,48 @@
[머신러닝스터디/2016]
[머신러닝스터디/2016/목차]
== 내용 ==
* Basic Logic Gate만들어보자!
* AND, OR, NXOR, XOR
=== 코드 ===
{{{
# (0, 0) => 0
# (0, 1) => 0
# (1, 0) => 0
# (1, 1) => 1
import tensorflow as tf
# AND OR NXOR XOR
# (0, 0) => 0 (0, 0) => 0 (0, 0) => 1 (0, 0) => 0
# (0, 1) => 0 (0, 1) => 1 (0, 1) => 0 (0, 1) => 1
# (1, 0) => 0 (1, 0) => 1 (1, 0) => 0 (1, 0) => 1
# (1, 1) => 1 (1, 1) => 1 (1, 1) => 1 (1, 1) => 0
W1 = tf.Variable(tf.random_uniform([2, 2]))
b1 = tf.Variable(tf.random_uniform([2]))
W2 = tf.Variable(tf.random_uniform([2, 1]))
b2 = tf.Variable(tf.random_uniform([1]))
def logic_gate(x):
hidden = tf.sigmoid(tf.add(tf.matmul(x, W1), b1))
return tf.sigmoid(tf.add(tf.matmul(hidden, W2), b2))
hidden = tf.sigmoid(tf.matmul(x, W1) + b1)
return tf.sigmoid(tf.matmul(hidden, W2) + b2)
x = tf.placeholder("float", [None, 2])
y = tf.placeholder("float", [None, 1])
value = logic_gate(x)
loss = tf.reduce_mean(-(y * tf.log(value) - ((1-y) * tf.log(1-value))))
// loss = tf.reduce_sum(tf.pow(y-value, 2))
loss = - tf.reduce_mean(y*tf.log(value) + (1-y)*tf.log(1-value))
optimize = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for i in range(30001):
result = sess.run(optimize, feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]})
if (i % 1000 == 0):
print(i)
print("Epoch: ", i)
print(sess.run([value, loss], feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]}))
}}}
== 후기 ==
== 다음 시간에는 ==
* ML Week 5 Back Propagation 실습
== 더 보기 ==
 



내용

  • Basic Logic Gate만들어보자!
    • AND, OR, NXOR, XOR

코드

import tensorflow as tf
# AND          OR           NXOR          XOR
# (0, 0) => 0  (0, 0) => 0  (0, 0) => 1  (0, 0) => 0
# (0, 1) => 0  (0, 1) => 1  (0, 1) => 0  (0, 1) => 1
# (1, 0) => 0  (1, 0) => 1  (1, 0) => 0  (1, 0) => 1
# (1, 1) => 1  (1, 1) => 1  (1, 1) => 1  (1, 1) => 0

W1 = tf.Variable(tf.random_uniform([2, 2]))
b1 = tf.Variable(tf.random_uniform([2]))

W2 = tf.Variable(tf.random_uniform([2, 1]))
b2 = tf.Variable(tf.random_uniform([1]))

def logic_gate(x):
    hidden = tf.sigmoid(tf.matmul(x, W1) + b1)
    return tf.sigmoid(tf.matmul(hidden, W2) + b2)

x = tf.placeholder("float", [None, 2])
y = tf.placeholder("float", [None, 1])

value = logic_gate(x)
// loss = tf.reduce_sum(tf.pow(y-value, 2))
loss = - tf.reduce_mean(y*tf.log(value) + (1-y)*tf.log(1-value))
optimize = tf.train.GradientDescentOptimizer(0.01).minimize(loss)

init = tf.initialize_all_variables()

with tf.Session() as sess:
    sess.run(init)
    for i in range(30001):
        result = sess.run(optimize, feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]})
        if (i % 1000 == 0):
            print("Epoch: ", i)
            print(sess.run([value, loss], feed_dict={x: [[0, 0], [0, 1], [1, 0], [1, 1]], y: [[1], [0], [0], [1]]}))

후기

다음 시간에는

  • ML Week 5 Back Propagation 실습

더 보기



Valid XHTML 1.0! Valid CSS! powered by MoniWiki
last modified 2021-02-07 05:29:26
Processing time 0.0264 sec