import tensorflow as tf
hello = tf.constant("Hi")
print(hello)
Tensor("Const:0", shape=(), dtype=string)
# Tensor : 자료형
a = tf.constant(10)
b = tf.constant(2)
c = a + b
print(c)
Tensor("add:0", shape=(), dtype=int32)
# 텐서플로우는 그래프 생성 후, 그래프 실행하는 과정을 거쳐야 함. 지연 실행 방법을 사용
sess = tf.Session()
print(sess.run(c))
12
print(sess.run(hello))
b'Hi'
sess.close()
# 플레이스홀더는 매개변수
X = tf.placeholder(tf.float32, [None, 3])
print(X)
Tensor("Placeholder_1:0", shape=(?, 3), dtype=float32)
x_data = [[1,2,3], [4,5,6]]
W = tf.Variable(tf.random_normal([3,2]))
b = tf.Variable(tf.random_normal([2,1]))
expr = tf.matmul(X,W) + b
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print(x_data)
print(sess.run(W))
print(sess.run(b))
print(sess.run(expr, feed_dict={X: x_data}))
# feed_dict : 그래프 실행할 때 사용할 입력값을 지정
sess.close()
[[1, 2, 3], [4, 5, 6]] [[-0.72649789 -0.31295726] [ 1.18099403 -0.71925688] [-1.98263812 -2.10048032]] [[-1.28041577] [-1.60697615]] [[ -5.59283924 -9.33332729] [-10.50382614 -19.05797195]]
x_data
[[1, 2, 3], [4, 5, 6]]
x_data = [1, 2, 3]
y_data = [1, 2, 3]
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
# tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None)
b = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
X = tf.placeholder(tf.float32, name="X")
Y = tf.placeholder(tf.float32, name="Y")
hypothesis = W*X +b
cost = tf.reduce_mean(tf.square(hypothesis-Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(10):
_, cost_val = sess.run([train_op, cost], feed_dict={X:x_data, Y: y_data})
print(step, cost_val, sess.run(W), sess.run(b))
print("X: 5, Y:", sess.run(hypothesis, feed_dict={X:5}))
print("X: 5, Y:", sess.run(hypothesis, feed_dict={X:2.5}))
0 23.2728 [ 1.17120934] [ 0.16678452] 1 0.27883 [ 0.94470012] [ 0.06494389] 2 0.00412319 [ 0.97033584] [ 0.07407507] 3 0.00080411 [ 0.96839243] [ 0.07112574] 4 0.0007286 [ 0.96944255] [ 0.06954362] 5 0.000693551 [ 0.9701454] [ 0.06785788] 6 0.000660599 [ 0.97086656] [ 0.06622814] 7 0.000629222 [ 0.97156656] [ 0.06463589] 8 0.000599332 [ 0.97225004] [ 0.06308208] 9 0.000570862 [ 0.9729172] [ 0.06156565] X: 5, Y: [ 4.92615175] X: 5, Y: [ 2.49385858]
import numpy as np
x_data = np.array([[0,0], [1,0], [1,1], [0,0], [0,0], [0,1]])
y_data = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0],
[1, 0, 0],
[0, 0, 1]
])
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
W1 = tf.Variable(tf.random_uniform([2, 10], -1., 1.))
W2 = tf.Variable(tf.random_uniform([10, 3], -1., 1.))
b1 = tf.Variable(tf.zeros([10]))
b2 = tf.Variable(tf.zeros([3]))
L1 = tf.add(tf.matmul(X, W1), b1)
L1 = tf.nn.relu(L1)
model = tf.add(tf.matmul(L1, W2), b2)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=model))
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(cost)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for step in range(100):
sess.run(train_op, feed_dict={X:x_data, Y:y_data})
if (step+1)%10 == 0:
print(step+1, sess.run(cost, feed_dict={X:x_data, Y:y_data}))
prediction = tf.argmax(model, axis=1)
target = tf.argmax(Y, axis=1)
print("예측값: ", sess.run(prediction, feed_dict={X:x_data}))
print("실제값: ", sess.run(target, feed_dict={Y:y_data}))
is_correct = tf.equal(prediction, target)
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
print("정확도: {:.2f}".format(sess.run(accuracy*100, feed_dict={X: x_data, Y:y_data})))
10 0.820693 20 0.639553 30 0.514824 40 0.417581 50 0.332758 60 0.256476 70 0.190775 80 0.138239 90 0.0994742 100 0.0724157 예측값: [0 1 2 0 0 2] 실제값: [0 1 2 0 0 2] 정확도: 100.00
data = np.loadtxt("./sample_data.csv", delimiter=',', unpack=True, dtype='float32')
x_data = np.transpose(data[0:2])
y_data = np.transpose(data[2:])
data
array([[ 0., 1., 1., 0., 0., 0.], [ 0., 0., 1., 0., 0., 1.], [ 1., 0., 0., 1., 1., 0.], [ 0., 1., 0., 0., 0., 0.], [ 0., 0., 1., 0., 0., 1.]], dtype=float32)
# tf.reset_default_graph()
global_step = tf.Variable(0, trainable=False, name='global_step')
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
with tf.name_scope('layer1'):
W1 = tf.Variable(tf.random_uniform([2, 10], -1., 1.), name='W1')
L1 = tf.nn.relu(tf.matmul(X, W1))
with tf.name_scope('layer2'):
W2 = tf.Variable(tf.random_uniform([10, 20], -1., 1.), name='W2')
L2 = tf.nn.relu(tf.matmul(L1, W2))
with tf.name_scope('output'):
W3 = tf.Variable(tf.random_uniform([20, 3], -1., 1.), name='W3')
model = tf.matmul(L2, W3)
with tf.name_scope('optimizer'):
cost = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=model))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
# global_step로 넘겨준 변수를, 학습용 변수들을 최적화 할 때 마다 학습 횟수를 하나씩 증가시킵니다.
train_op = optimizer.minimize(cost, global_step=global_step)
# 손실값을 추적하기 위해 수집할 값을 지정
tf.summary.scalar('cost', cost)
sess = tf.Session()
# 모델을 저장하고 불러오는 API를 초기화합니다.
# global_variables 함수를 통해 앞서 정의하였던 변수들을 저장하거나 불러올 변수들로 설정합니다.
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state('./model')
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
merged = tf.summary.merge_all()
# 앞서 지정한 텐서들을 수집
writer = tf.summary.FileWriter('./logs', sess.graph)
# 그래프와 텐서들의 값을 저장할 디렉터리 설정
# 최적화 진행
for step in range(2):
sess.run(train_op, feed_dict={X: x_data, Y: y_data})
print("Step: {}".format(sess.run(global_step)),
"Cost: {:.3f}".format(sess.run(cost, feed_dict={X: x_data, Y: y_data})))
summary = sess.run(merged, feed_dict={X: x_data, Y: y_data})
writer.add_summary(summary, global_step=sess.run(global_step))
saver.save(sess, './model/dnn.ckpt', global_step=global_step)
# 결과 확인
# 0: 기타 1: 포유류, 2: 조류
######
prediction = tf.argmax(model, 1)
target = tf.argmax(Y, 1)
print('예측값:', sess.run(prediction, feed_dict={X: x_data}))
print('실제값:', sess.run(target, feed_dict={Y: y_data}))
is_correct = tf.equal(prediction, target)
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
print('정확도: {:.2f}'.format(sess.run(accuracy * 100, feed_dict={X: x_data, Y: y_data})))
Step: 1 Cost: 1.152 Step: 2 Cost: 1.045 예측값: [0 1 1 0 0 2] 실제값: [0 1 2 0 0 2] 정확도: 83.33
# tf.Variable(initial_value=None, trainable=True, collections=None, validate_shape=True,
# caching_device=None, name=None, variable_def=None, dtype=None, expected_shape=None, import_scope=None)
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./mnist/data/", one_hot=True)
Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes. Extracting ./mnist/data/train-images-idx3-ubyte.gz Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes. Extracting ./mnist/data/train-labels-idx1-ubyte.gz Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes. Extracting ./mnist/data/t10k-images-idx3-ubyte.gz Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes. Extracting ./mnist/data/t10k-labels-idx1-ubyte.gz
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
W1 = tf.Variable(tf.random_normal([784]))