IT Log

Tensorflow (2) 본문

기타

Tensorflow (2)

newly0513 2019. 5. 8. 10:35
728x90
반응형

MNIST 실습

실습 1.

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/.mnist/data/", one_hot=True)

X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])

W1 = tf.Variable(tf.random_normal([784, 256], stddev=0.01))
L1 = tf.nn.relu(tf.matmul(X, W1))

W2 = tf.Variable(tf.random_normal([256,256], stddev=0.01))
L2 = tf.nn.relu(tf.matmul(L1, W2))

W3 = tf.Variable(tf.random_normal([256,10], stddev=0.01))
model = tf.matmul(L2, W3)

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model, labels=Y))
optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)

init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

batch_size = 100
total_batch = int(mnist.train.num_examples / batch_size)

for epoch in range(15):
	total_cost = 0
	for i in range(total_batch):
		batch_xs, batch_ys = mnist.train.next_batch(batch_size)
		_, cost_val = sess.run([optimizer, cost], feed_dict={X:batch_xs, Y:batch_ys})
		total_cost += cost_val
	print('Epoch:', '%04d' % (epoch + 1), 'Avg. cost =', '{:3f}'.format(total_cost / total_batch))
print('최적화 완료!')

is_correct = tf.equal(tf.argmax(model, 1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
print('정확도:', sess.run(accuracy, feed_dict={X:mnist.test.images,Y:mnist.test.labels}))
                    
Extracting /.mnist/data/train-images-idx3-ubyte.gz
Extracting /.mnist/data/train-labels-idx1-ubyte.gz
Extracting /.mnist/data/t10k-images-idx3-ubyte.gz
Extracting /.mnist/data/t10k-labels-idx1-ubyte.gz
Epoch: 0001 Avg. cost = 0.404756
Epoch: 0002 Avg. cost = 0.152573
Epoch: 0003 Avg. cost = 0.096750
Epoch: 0004 Avg. cost = 0.071138
Epoch: 0005 Avg. cost = 0.051414
Epoch: 0006 Avg. cost = 0.041225
Epoch: 0007 Avg. cost = 0.029426
Epoch: 0008 Avg. cost = 0.023973
Epoch: 0009 Avg. cost = 0.021442
Epoch: 0010 Avg. cost = 0.017002
Epoch: 0011 Avg. cost = 0.014556
Epoch: 0012 Avg. cost = 0.013162
Epoch: 0013 Avg. cost = 0.013430
Epoch: 0014 Avg. cost = 0.012036
Epoch: 0015 Avg. cost = 0.010803
최적화 완료!
정확도: 0.9804
출처 : 골빈해커의 3분 딥러닝 

실습2. 패션MNIST

from __future__ import absolute_import, division, print_function, unicode_literals, unicode_literals
import tensorflow as tf
from tensorflow import keras

import numpy as np
import matplotlib.pyplot as plt

#데이터셋 가져오기
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']

#데이터 확인
train_images.shape
len(train_labels)
train_labels
test_images.shape
len(test_labels)

#데이터 전처리
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()

train_images = train_images / 255.0
test_images = test_images / 255.0

plt.figure(figsize=(10,10))
for i in range(25):
    plt.subplot(5,5,i+1)
    plt.xticks([])
    plt.yticks([])
    plt.grid(False)
    plt.imshow(train_images[i], cmap=plt.cm.binary)
    plt.xlabel(class_names[train_labels[i]])
plt.show()

#모델 구성
model = keras.Sequential([
    keras.layers.Flatten(input_shape=(28,28)),
    keras.layers.Dense(128, activation=tf.nn.relu),
    keras.layers.Dense(10, activation=tf.nn.softmax)
])

#모델 컴파일
model.compile(optimizer='adam',
             loss='sparse_categorical_crossentropy',
             metrics=['accuracy'])

#모델 훈련
model.fit(train_images, train_labels, epochs=5)

#정확도 평가
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('테스트 정확도:', test_acc)

#예측하기
predictions = model.predict(test_images)
predictions[0]
np.argmax(predictions[0])
test_labels[0]

#그래프표현
def plot_image(i, predictions_array, true_label, img):
    predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
    plt.grid(False)
    plt.xticks([])
    plt.yticks([])
    plt.imshow(img, cmap=plt.cm.binary)
    predicted_label = np.argmax(predictions_array)
    if predicted_label == true_label:
        color = 'blue'
    else:
        color = 'red'
    
    plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
                                        100*np.max(predictions_array),
                                        class_names[true_label]),
              color=color)
    
def plot_value_array(i, predictions_array, true_label):
    predictions_array, true_label = predictions_array[i], true_label[i]
    plt.grid(False)
    plt.xticks([])
    plt.yticks([])
    thisplot = plt.bar(range(10), predictions_array, color="#777777")
    plt.ylim([0,1])
    predicted_label = np.argmax(predictions_array)

    thisplot[predicted_label].set_color('red')
    thisplot[true_label].set_color('blue')
    
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
plt.show()

i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
plt.show()

num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
    plt.subplot(num_rows, 2*num_cols, 2*i+1)
    plot_image(i, predictions, test_labels, test_images)
    plt.subplot(num_rows, 2*num_cols, 2*i+2)
    plot_value_array(i, predictions, test_labels)
plt.show()

plot_value_array(0, predictions_single, test_labels)
plt.xticks(range(10), class_names, rotation=45)
plt.show()

prediction_result = np.argmax(predictions_single[0])
print(prediction_result)
    

Epoch 1/5
60000/60000 [==============================] - 3s 54us/sample - loss: 0.4980 - acc: 0.8261
Epoch 2/5
60000/60000 [==============================] - 3s 50us/sample - loss: 0.3744 - acc: 0.8650
Epoch 3/5
60000/60000 [==============================] - 3s 51us/sample - loss: 0.3355 - acc: 0.8778
Epoch 4/5
60000/60000 [==============================] - 3s 51us/sample - loss: 0.3121 - acc: 0.8864
Epoch 5/5
60000/60000 [==============================] - 3s 52us/sample - loss: 0.2939 - acc: 0.8913
10000/10000 [==============================] - 0s 32us/sample - loss: 0.3629 - acc: 0.8680
테스트 정확도: 0.868

출처 : https://www.tensorflow.org/tutorials/keras/basic_classification

실습 3. 영화리뷰

from __future__ import absolute_import, division, print_function, unicode_literals, unicode_literals
import tensorflow as tf
from tensorflow import keras

import numpy as np

#IMDB 데이터셋 다운로드
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)

#데이터 확인
print("훈련 샘플: {}, 레이블: {}".format(len(train_data), len(train_labels)))
print(train_data[0])
len(train_data[0]), len(train_data[1])
word_index = imdb.get_word_index()
word_index = {k:(v+3) for k,v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2  # unknown
word_index["<UNUSED>"] = 3
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
def decode_review(text):
    return ' '.join([reverse_word_index.get(i, '?') for i in text])
decode_review(train_data[0])

#데이터 준비
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
                                                        value=word_index["<PAD>"],
                                                        padding='post',
                                                        maxlen=256)

test_data = keras.preprocessing.sequence.pad_sequences(test_data,
                                                       value=word_index["<PAD>"],
                                                       padding='post',
                                                       maxlen=256)
len(train_data[0]), len(train_data[1])
print(train_data[0])

#모델 구성
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16, input_shape=(None,)))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
model.compile(optimizer=tf.train.AdamOptimizer(),
              loss='binary_crossentropy',
              metrics=['acc'])
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]

#모델 훈련
history = model.fit(partial_x_train,
                    partial_y_train,
                    epochs=40,
                    batch_size=512,
                    validation_data=(x_val, y_val),
                    verbose=1)

#모델 평가
results = model.evaluate(test_data, test_labels)
print(results)

#그래프 그리기
history_dict = history.history
history_dict.keys()

import matplotlib.pyplot as plt

acc = history_dict['acc']
val_acc = history_dict['val_acc']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()

plt.clf()   # 그림을 초기화합니다
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()

plt.show()
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz
17465344/17464789 [==============================] - 2s 0us/step
훈련 샘플: 25000, 레이블: 25000
[1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25, 100, 43, 838, 112, 50, 670, 2, 9, 35, 480, 284, 5, 150, 4, 172, 112, 167, 2, 336, 385, 39, 4, 172, 4536, 1111, 17, 546, 38, 13, 447, 4, 192, 50, 16, 6, 147, 2025, 19, 14, 22, 4, 1920, 4613, 469, 4, 22, 71, 87, 12, 16, 43, 530, 38, 76, 15, 13, 1247, 4, 22, 17, 515, 17, 12, 16, 626, 18, 2, 5, 62, 386, 12, 8, 316, 8, 106, 5, 4, 2223, 5244, 16, 480, 66, 3785, 33, 4, 130, 12, 16, 38, 619, 5, 25, 124, 51, 36, 135, 48, 25, 1415, 33, 6, 22, 12, 215, 28, 77, 52, 5, 14, 407, 16, 82, 2, 8, 4, 107, 117, 5952, 15, 256, 4, 2, 7, 3766, 5, 723, 36, 71, 43, 530, 476, 26, 400, 317, 46, 7, 4, 2, 1029, 13, 104, 88, 4, 381, 15, 297, 98, 32, 2071, 56, 26, 141, 6, 194, 7486, 18, 4, 226, 22, 21, 134, 476, 26, 480, 5, 144, 30, 5535, 18, 51, 36, 28, 224, 92, 25, 104, 4, 226, 65, 16, 38, 1334, 88, 12, 16, 283, 5, 16, 4472, 113, 103, 32, 15, 16, 5345, 19, 178, 32]
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb_word_index.json
1646592/1641221 [==============================] - 1s 0us/step
[   1   14   22   16   43  530  973 1622 1385   65  458 4468   66 3941
    4  173   36  256    5   25  100   43  838  112   50  670    2    9
   35  480  284    5  150    4  172  112  167    2  336  385   39    4
  172 4536 1111   17  546   38   13  447    4  192   50   16    6  147
 2025   19   14   22    4 1920 4613  469    4   22   71   87   12   16
   43  530   38   76   15   13 1247    4   22   17  515   17   12   16
  626   18    2    5   62  386   12    8  316    8  106    5    4 2223
 5244   16  480   66 3785   33    4  130   12   16   38  619    5   25
  124   51   36  135   48   25 1415   33    6   22   12  215   28   77
   52    5   14  407   16   82    2    8    4  107  117 5952   15  256
    4    2    7 3766    5  723   36   71   43  530  476   26  400  317
   46    7    4    2 1029   13  104   88    4  381   15  297   98   32
 2071   56   26  141    6  194 7486   18    4  226   22   21  134  476
   26  480    5  144   30 5535   18   51   36   28  224   92   25  104
    4  226   65   16   38 1334   88   12   16  283    5   16 4472  113
  103   32   15   16 5345   19  178   32    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0    0    0    0    0    0    0    0    0    0    0
    0    0    0    0]
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
embedding (Embedding)        (None, None, 16)          160000    
_________________________________________________________________
global_average_pooling1d (Gl (None, 16)                0         
_________________________________________________________________
dense_14 (Dense)             (None, 16)                272       
_________________________________________________________________
dense_15 (Dense)             (None, 1)                 17        
=================================================================
Total params: 160,289
Trainable params: 160,289
Non-trainable params: 0
_________________________________________________________________
Train on 15000 samples, validate on 10000 samples
WARNING:tensorflow:From C:\Users\KIH\Anaconda3\lib\site-packages\tensorflow\python\ops\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
Epoch 1/40
15000/15000 [==============================] - 1s 74us/sample - loss: 0.6919 - acc: 0.5193 - val_loss: 0.6901 - val_acc: 0.5434
Epoch 2/40
15000/15000 [==============================] - 1s 56us/sample - loss: 0.6867 - acc: 0.6275 - val_loss: 0.6835 - val_acc: 0.6546
Epoch 3/40
15000/15000 [==============================] - 1s 58us/sample - loss: 0.6760 - acc: 0.7101 - val_loss: 0.6701 - val_acc: 0.7334
Epoch 4/40
15000/15000 [==============================] - 1s 56us/sample - loss: 0.6573 - acc: 0.7389 - val_loss: 0.6488 - val_acc: 0.7629
Epoch 5/40
15000/15000 [==============================] - 1s 65us/sample - loss: 0.6294 - acc: 0.7821 - val_loss: 0.6193 - val_acc: 0.7817
Epoch 6/40
15000/15000 [==============================] - 1s 57us/sample - loss: 0.5928 - acc: 0.8081 - val_loss: 0.5833 - val_acc: 0.7957
Epoch 7/40
15000/15000 [==============================] - 1s 55us/sample - loss: 0.5501 - acc: 0.8227 - val_loss: 0.5424 - val_acc: 0.8148
Epoch 8/40
15000/15000 [==============================] - 1s 54us/sample - loss: 0.5054 - acc: 0.8417 - val_loss: 0.5027 - val_acc: 0.8276
Epoch 9/40
15000/15000 [==============================] - 1s 57us/sample - loss: 0.4619 - acc: 0.8554 - val_loss: 0.4651 - val_acc: 0.8393
Epoch 10/40
15000/15000 [==============================] - 1s 56us/sample - loss: 0.4220 - acc: 0.8693 - val_loss: 0.4327 - val_acc: 0.8463 - loss: 0.4270 - acc: 0.
Epoch 11/40
15000/15000 [==============================] - 1s 54us/sample - loss: 0.3873 - acc: 0.8767 - val_loss: 0.4051 - val_acc: 0.8547
Epoch 12/40
15000/15000 [==============================] - 1s 56us/sample - loss: 0.3575 - acc: 0.8860 - val_loss: 0.3838 - val_acc: 0.8575
Epoch 13/40
15000/15000 [==============================] - 1s 57us/sample - loss: 0.3330 - acc: 0.8915 - val_loss: 0.3642 - val_acc: 0.8648
Epoch 14/40
15000/15000 [==============================] - 1s 55us/sample - loss: 0.3110 - acc: 0.8971 - val_loss: 0.3500 - val_acc: 0.8689
Epoch 15/40
15000/15000 [==============================] - 1s 56us/sample - loss: 0.2926 - acc: 0.9010 - val_loss: 0.3373 - val_acc: 0.8709
Epoch 16/40
15000/15000 [==============================] - 1s 56us/sample - loss: 0.2754 - acc: 0.9062 - val_loss: 0.3266 - val_acc: 0.8738 0s - loss: 0.2785 - acc
Epoch 17/40
15000/15000 [==============================] - ETA: 0s - loss: 0.2613 - acc: 0.909 - ETA: 0s - loss: 0.2601 - acc: 0.910 - 1s 55us/sample - loss: 0.2594 - acc: 0.9111 - val_loss: 0.3177 - val_acc: 0.8755
Epoch 18/40
15000/15000 [==============================] - 1s 56us/sample - loss: 0.2451 - acc: 0.9158 - val_loss: 0.3099 - val_acc: 0.8788
Epoch 19/40
15000/15000 [==============================] - 1s 55us/sample - loss: 0.2324 - acc: 0.9194 - val_loss: 0.3037 - val_acc: 0.8805
Epoch 20/40
15000/15000 [==============================] - ETA: 0s - loss: 0.2215 - acc: 0.9226- ETA: 0s - loss: 0.2207 - acc: 0.9 - 1s 55us/sample - loss: 0.2211 - acc: 0.9230 - val_loss: 0.2993 - val_acc: 0.8802
Epoch 21/40
15000/15000 [==============================] - 1s 55us/sample - loss: 0.2099 - acc: 0.9275 - val_loss: 0.2956 - val_acc: 0.8806
Epoch 22/40
15000/15000 [==============================] - 1s 56us/sample - loss: 0.2004 - acc: 0.9296 - val_loss: 0.2924 - val_acc: 0.8829ss: 0.2006 - acc: 0.929
Epoch 23/40
15000/15000 [==============================] - 1s 55us/sample - loss: 0.1911 - acc: 0.9341 - val_loss: 0.2908 - val_acc: 0.8829
Epoch 24/40
15000/15000 [==============================] - 1s 58us/sample - loss: 0.1830 - acc: 0.9379 - val_loss: 0.2887 - val_acc: 0.8837
Epoch 25/40
15000/15000 [==============================] - 1s 57us/sample - loss: 0.1748 - acc: 0.9421 - val_loss: 0.2869 - val_acc: 0.8842
Epoch 26/40
15000/15000 [==============================] - 1s 56us/sample - loss: 0.1675 - acc: 0.9447 - val_loss: 0.2873 - val_acc: 0.8829
Epoch 27/40
15000/15000 [==============================] - 1s 54us/sample - loss: 0.1606 - acc: 0.9477 - val_loss: 0.2865 - val_acc: 0.8840
Epoch 28/40
15000/15000 [==============================] - 1s 55us/sample - loss: 0.1540 - acc: 0.9515 - val_loss: 0.2866 - val_acc: 0.8847
Epoch 29/40
15000/15000 [==============================] - 1s 56us/sample - loss: 0.1483 - acc: 0.9541 - val_loss: 0.2881 - val_acc: 0.8831 - loss: 0.1475 - acc: 0.954
Epoch 30/40
15000/15000 [==============================] - 1s 57us/sample - loss: 0.1423 - acc: 0.9557 - val_loss: 0.2873 - val_acc: 0.8850
Epoch 31/40
15000/15000 [==============================] - 1s 58us/sample - loss: 0.1363 - acc: 0.9593 - val_loss: 0.2883 - val_acc: 0.8862
Epoch 32/40
15000/15000 [==============================] - 1s 59us/sample - loss: 0.1309 - acc: 0.9609 - val_loss: 0.2896 - val_acc: 0.8860
Epoch 33/40
15000/15000 [==============================] - 1s 58us/sample - loss: 0.1256 - acc: 0.9629 - val_loss: 0.2918 - val_acc: 0.8859
Epoch 34/40
15000/15000 [==============================] - 1s 58us/sample - loss: 0.1210 - acc: 0.9648 - val_loss: 0.2936 - val_acc: 0.8853
Epoch 35/40
15000/15000 [==============================] - 1s 58us/sample - loss: 0.1168 - acc: 0.9653 - val_loss: 0.2961 - val_acc: 0.8860
Epoch 36/40
15000/15000 [==============================] - 1s 59us/sample - loss: 0.1121 - acc: 0.9685 - val_loss: 0.2980 - val_acc: 0.8851
Epoch 37/40
15000/15000 [==============================] - 1s 59us/sample - loss: 0.1076 - acc: 0.9693 - val_loss: 0.3004 - val_acc: 0.8846
Epoch 38/40
15000/15000 [==============================] - 1s 58us/sample - loss: 0.1035 - acc: 0.9706 - val_loss: 0.3040 - val_acc: 0.8835
Epoch 39/40
15000/15000 [==============================] - 1s 59us/sample - loss: 0.1002 - acc: 0.9720 - val_loss: 0.3071 - val_acc: 0.8833
Epoch 40/40
15000/15000 [==============================] - 1s 58us/sample - loss: 0.0961 - acc: 0.9741 - val_loss: 0.3096 - val_acc: 0.8842
25000/25000 [==============================] - 1s 24us/sample - loss: 0.3308 - acc: 0.8720
[0.3307978550672531, 0.87204]

728x90
반응형

'기타' 카테고리의 다른 글

DTD (Document Type Definition) 요소, 속성, 엔티티  (0) 2021.02.23
플랫폼(Platform)이란?  (0) 2020.08.08
TensorFlow  (0) 2019.05.07
Redis  (0) 2019.05.07
Cassandra  (0) 2019.05.07
Comments