#!/usr/bin/env python # coding: utf-8 # In[1]: import tensorflow as tf # In[2]: hello = tf.constant("Hi") # In[3]: print(hello) # In[4]: # Tensor : 자료형 # In[5]: a = tf.constant(10) b = tf.constant(2) c = a + b # In[6]: print(c) # In[7]: # 텐서플로우는 그래프 생성 후, 그래프 실행하는 과정을 거쳐야 함. 지연 실행 방법을 사용 # In[9]: sess = tf.Session() print(sess.run(c)) # In[11]: print(sess.run(hello)) # In[12]: sess.close() # In[13]: # 플레이스홀더는 매개변수 # In[15]: X = tf.placeholder(tf.float32, [None, 3]) print(X) # In[16]: x_data = [[1,2,3], [4,5,6]] # In[18]: W = tf.Variable(tf.random_normal([3,2])) b = tf.Variable(tf.random_normal([2,1])) # In[19]: expr = tf.matmul(X,W) + b # In[23]: sess = tf.Session() sess.run(tf.global_variables_initializer()) print(x_data) print(sess.run(W)) print(sess.run(b)) print(sess.run(expr, feed_dict={X: x_data})) # feed_dict : 그래프 실행할 때 사용할 입력값을 지정 sess.close() # In[24]: x_data # In[25]: x_data = [1, 2, 3] y_data = [1, 2, 3] # In[26]: W = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) # In[27]: # tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None) # In[28]: b = tf.Variable(tf.random_uniform([1], -1.0, 1.0)) # In[29]: X = tf.placeholder(tf.float32, name="X") # In[31]: Y = tf.placeholder(tf.float32, name="Y") # In[33]: hypothesis = W*X +b # In[34]: cost = tf.reduce_mean(tf.square(hypothesis-Y)) # In[35]: optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1) # In[36]: train_op = optimizer.minimize(cost) # In[42]: with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for step in range(10): _, cost_val = sess.run([train_op, cost], feed_dict={X:x_data, Y: y_data}) print(step, cost_val, sess.run(W), sess.run(b)) print("X: 5, Y:", sess.run(hypothesis, feed_dict={X:5})) print("X: 5, Y:", sess.run(hypothesis, feed_dict={X:2.5})) # # 4. 기본 신경망 구현 # In[43]: import numpy as np # In[44]: x_data = np.array([[0,0], [1,0], [1,1], [0,0], [0,0], [0,1]]) # In[45]: y_data = np.array([ [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 0, 0], [1, 0, 0], [0, 0, 1] ]) # In[46]: X = tf.placeholder(tf.float32) # In[47]: Y = tf.placeholder(tf.float32) # In[69]: W1 = tf.Variable(tf.random_uniform([2, 10], -1., 1.)) W2 = tf.Variable(tf.random_uniform([10, 3], -1., 1.)) # In[70]: b1 = tf.Variable(tf.zeros([10])) b2 = tf.Variable(tf.zeros([3])) # In[73]: L1 = tf.add(tf.matmul(X, W1), b1) # In[74]: L1 = tf.nn.relu(L1) # In[75]: model = tf.add(tf.matmul(L1, W2), b2) # In[77]: cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=model)) # In[82]: # optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) optimizer = tf.train.AdamOptimizer(learning_rate=0.01) train_op = optimizer.minimize(cost) # In[83]: init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) for step in range(100): sess.run(train_op, feed_dict={X:x_data, Y:y_data}) if (step+1)%10 == 0: print(step+1, sess.run(cost, feed_dict={X:x_data, Y:y_data})) prediction = tf.argmax(model, axis=1) target = tf.argmax(Y, axis=1) print("예측값: ", sess.run(prediction, feed_dict={X:x_data})) print("실제값: ", sess.run(target, feed_dict={Y:y_data})) is_correct = tf.equal(prediction, target) accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32)) print("정확도: {:.2f}".format(sess.run(accuracy*100, feed_dict={X: x_data, Y:y_data}))) # # 5. 텐서보드와 모델 재사용 # In[116]: data = np.loadtxt("./sample_data.csv", delimiter=',', unpack=True, dtype='float32') x_data = np.transpose(data[0:2]) y_data = np.transpose(data[2:]) # In[117]: data # In[119]: # tf.reset_default_graph() global_step = tf.Variable(0, trainable=False, name='global_step') X = tf.placeholder(tf.float32) Y = tf.placeholder(tf.float32) with tf.name_scope('layer1'): W1 = tf.Variable(tf.random_uniform([2, 10], -1., 1.), name='W1') L1 = tf.nn.relu(tf.matmul(X, W1)) with tf.name_scope('layer2'): W2 = tf.Variable(tf.random_uniform([10, 20], -1., 1.), name='W2') L2 = tf.nn.relu(tf.matmul(L1, W2)) with tf.name_scope('output'): W3 = tf.Variable(tf.random_uniform([20, 3], -1., 1.), name='W3') model = tf.matmul(L2, W3) with tf.name_scope('optimizer'): cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=model)) optimizer = tf.train.AdamOptimizer(learning_rate=0.01) # global_step로 넘겨준 변수를, 학습용 변수들을 최적화 할 때 마다 학습 횟수를 하나씩 증가시킵니다. train_op = optimizer.minimize(cost, global_step=global_step) # 손실값을 추적하기 위해 수집할 값을 지정 tf.summary.scalar('cost', cost) sess = tf.Session() # 모델을 저장하고 불러오는 API를 초기화합니다. # global_variables 함수를 통해 앞서 정의하였던 변수들을 저장하거나 불러올 변수들로 설정합니다. saver = tf.train.Saver(tf.global_variables()) ckpt = tf.train.get_checkpoint_state('./model') if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path): saver.restore(sess, ckpt.model_checkpoint_path) else: sess.run(tf.global_variables_initializer()) merged = tf.summary.merge_all() # 앞서 지정한 텐서들을 수집 writer = tf.summary.FileWriter('./logs', sess.graph) # 그래프와 텐서들의 값을 저장할 디렉터리 설정 # 최적화 진행 for step in range(2): sess.run(train_op, feed_dict={X: x_data, Y: y_data}) print("Step: {}".format(sess.run(global_step)), "Cost: {:.3f}".format(sess.run(cost, feed_dict={X: x_data, Y: y_data}))) summary = sess.run(merged, feed_dict={X: x_data, Y: y_data}) writer.add_summary(summary, global_step=sess.run(global_step)) saver.save(sess, './model/dnn.ckpt', global_step=global_step) # 결과 확인 # 0: 기타 1: 포유류, 2: 조류 ###### prediction = tf.argmax(model, 1) target = tf.argmax(Y, 1) print('예측값:', sess.run(prediction, feed_dict={X: x_data})) print('실제값:', sess.run(target, feed_dict={Y: y_data})) is_correct = tf.equal(prediction, target) accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32)) print('정확도: {:.2f}'.format(sess.run(accuracy * 100, feed_dict={X: x_data, Y: y_data}))) # In[95]: # tf.Variable(initial_value=None, trainable=True, collections=None, validate_shape=True, # caching_device=None, name=None, variable_def=None, dtype=None, expected_shape=None, import_scope=None) # [텐서플로 서빙 홈페이지](https://tensorflow.github.io/serving/) # In[2]: from tensorflow.examples.tutorials.mnist import input_data # In[3]: mnist = input_data.read_data_sets("./mnist/data/", one_hot=True) # In[4]: X = tf.placeholder(tf.float32, [None, 784]) Y = tf.placeholder(tf.float32, [None, 10]) # In[ ]: W1 = tf.Variable(tf.random_normal([784]))