solstyle

공지 사항

'temp'에 해당되는 글 3건

  1. 2020.06.01 fashion mnist tf
  2. 2020.06.01 deep ln 4
  3. 2020.06.01 mnist 3layers
temp 2020. 6. 1. 23:35 by solstyle

# fashion mnist

# 4 layer neural network 구성

 

import tensorflow as tf

 

# 데이터 준비

(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()

 

# shape를 조정

x_train = x_train.reshape(-1, 28 * 28)

x_test = x_test.reshape(-1, 28 * 28)

 

y_train = tf.keras.utils.to_categorical(y_train)

y_test = tf.keras.utils.to_categorical(y_test)

 

print(x_train.shape, y_train.shape)

print(x_test.shape, y_test.shape)

 

tf.reset_default_graph()

tf.random.set_random_seed(1)

 

X = tf.placeholder(tf.float32, shape=[None, 784])

Y = tf.placeholder(tf.float32, shape=[None, 10])

kp = tf.placeholder(tf.float32)

 

# layer 1 만들기

W1 = tf.Variable(tf.random_normal(shape=[784, 256]))

B1 = tf.Variable(tf.random_normal(shape=[256]))

H1 = tf.matmul(X, W1) + B1

H1 = tf.contrib.layers.batch_norm(H1)

H1 = tf.nn.relu(H1)

H1 = tf.nn.dropout(H1, keep_prob=kp)

# H1 shape = [None, 128]

 

# layer 2 만들기

W2 = tf.Variable(tf.random_normal(shape=[256, 256]))

B2 = tf.Variable(tf.random_normal(shape=[256]))

H2 = tf.matmul(H1, W2) + B2

H2 = tf.contrib.layers.batch_norm(H2)

H2 = tf.nn.relu(H2)

H2 = tf.nn.dropout(H2, keep_prob=kp)

 

# layer 3

W3 = tf.Variable(tf.random_normal(shape=[256, 128]))

B3 = tf.Variable(tf.random_normal(shape=[128]))

H3 = tf.matmul(H2, W3) + B3

H3 = tf.contrib.layers.batch_norm(H3)

H3 = tf.nn.relu(H3)

H3 = tf.nn.dropout(H3, keep_prob=kp)

 

# layer 4

W4 = tf.Variable(tf.random_normal(shape=[128, 10]))

B4 = tf.Variable(tf.random_normal(shape=[10]))

# H4 = tf.matmul(H3, W4) + B4

# H4 = tf.nn.relu(H4)

# H4 = tf.nn.dropout(H4, keep_prob=kp)

 

logit = tf.matmul(H3, W4) + B4

 

# loss

loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logit, labels=Y)

loss = tf.reduce_mean(loss)

 

pred = tf.nn.softmax(logit)

acc = tf.equal(tf.argmax(pred, axis=1), tf.argmax(Y, axis=1))

acc = tf.reduce_mean(tf.cast(acc, tf.float32))

 

optimizer = tf.train.AdamOptimizer(0.01).minimize(loss)

 

 

# 모델 학습

 

sess = tf.Session()

sess.run(tf.global_variables_initializer())

 

epochs = 5

batch = 256

n_batch = len(x_train) // batch

 

for e in range(epochs):

for b in range(n_batch):

x = x_train[b * batch:(b + 1) * batch]

y = y_train[b * batch:(b + 1) * batch]

sess.run(optimizer, feed_dict={X: x, Y: y, kp: 0.8})

 

if b % 20 == 0:

print(sess.run([loss, acc], feed_dict={X: x, Y: y, kp: 1.0}))

 

accuracy = sess.run(acc, feed_dict={X: x_test, Y: y_test, kp: 1.0})

print("{0: .2f}%".format(accuracy * 100))

 

 

 

 

fashion mnist CNN

  • conv1 8장
  • pool
  • conv2 16장
  • pool
  • fc

import tensorflow as tf

 

tf.reset_default_graph()

 

(x_train, y_train), (x_test, y_test) = \

tf.keras.datasets.cifar10.load_data()

 

x_train = x_train / 255

x_test = x_test / 255

 

# X = (X - min) / (max - min)

 

y_train = tf.keras.utils.to_categorical(y_train)

y_test = tf.keras.utils.to_categorical(y_test)

 

print(x_train.shape, y_train.shape)

 

tf.reset_default_graph()

# tf.random.set_random_seed(1)

 

# conv1 준비

X = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])

Y = tf.placeholder(tf.float32, shape=[None, 10])

 

W1 = tf.Variable(tf.random_normal(shape=[3, 3, 3, 16]))

B1 = tf.Variable(tf.random_normal(shape=[16]))

H1 = tf.nn.conv2d(X, W1, strides=[1, 1, 1, 1], padding="SAME")

H1 = tf.add(H1, B1)

H1 = tf.contrib.layers.batch_norm(H1)

HR1 = tf.nn.relu(H1)

 

# pool1

P1 = tf.nn.max_pool(HR1,

ksize=[1, 2, 2, 1],

strides=[1, 2, 2, 1],

padding="SAME")

 

# conv2

W2 = tf.Variable(tf.random_normal(shape=[3, 3, 16, 16]))

B2 = tf.Variable(tf.random_normal(shape=[16]))

H2 = tf.nn.conv2d(P1, W2, strides=[1, 1, 1, 1], padding="SAME")

H2 = tf.add(H2, B2)

H2 = tf.contrib.layers.batch_norm(H2)

HR2 = tf.nn.relu(H2)

 

# pool2

P2 = tf.nn.max_pool(HR2,

ksize=[1, 2, 2, 1],

strides=[1, 2, 2, 1],

padding="SAME")

 

# flatten

flat = tf.reshape(P2, [-1, 8 * 8 * 16])

 

# fc layer

W3 = tf.Variable(tf.random_normal(shape=[8 * 8 * 16, 10]))

B3 = tf.Variable(tf.random_normal(shape=[10]))

logit = tf.matmul(flat, W3) + B3

 

# loss

loss = tf.nn.softmax_cross_entropy_with_logits_v2(

logits=logit, labels=Y)

loss = tf.reduce_mean(loss)

optimizer = tf.train.AdamOptimizer(0.01).minimize(loss)

 

# acc

pred = tf.nn.softmax(logit)

acc = tf.equal(tf.argmax(pred, axis=1), tf.argmax(Y, axis=1))

acc = tf.reduce_mean(tf.cast(acc, tf.float32))



 

sess = tf.Session()

sess.run(tf.global_variables_initializer())

 

epochs = 10

batch = 256

n_batch = len(x_train) // batch

 

for e in range(epochs):

for b in range(n_batch):

x = x_train[b * batch:(b + 1) * batch]

y = y_train[b * batch:(b + 1) * batch]

sess.run(optimizer, feed_dict={X: x, Y: y})

 

if b % 20 == 0:

print(sess.run([loss, acc], feed_dict={X: x_train, Y: y_train}))

 

print("학습평가")

print(sess.run([loss, acc], feed_dict={X: x_test, Y: y_test}))

 

 

 

temp 2020. 6. 1. 23:31 by solstyle

RNN 실습

  • 시계열 데이터를 RNN으로 돌려보자.
  •  

import tensorflow as tf

import numpy as np

 

#데이터 준비

file = "data-02-stock_daily.csv"

dataset = np.loadtxt(file, delimiter=",")

dataset = dataset[::-1]

 

#min max 만들기 - normalization

#각 열의 값을 그 열의 min값으로 빼주고 그 값을

dataset = (dataset - dataset.min(axis=0)) / (dataset.max(axis=0) - dataset.min(axis=0))

 

x = np.array(

[dataset[i : i + 7] for i in range(len(dataset) - 7 )]

)

y = np.array(

[dataset[i+7, -1:] for i in range(len(dataset) - 7)])

 

print(dataset.shape)

print(dataset[0])

print(x[0])

print(y[0])

print(x.shape)

print(y.shape)

print(y[-1])

 

x_train, x_test = x[:505], x[505:]

y_train, y_test = y[:505], y[505:]

print(x_train.shape, x_test.shape)

print(y_train.shape, y_test.shape)

 

 

#그래프그리기

tf.reset_default_graph()ou

 

X = tf.placeholder(tf.float32, shape = [None, 7, 5])

Y = tf.placeholder(tf.float32, shape = [None, 1])

 

cell = tf.nn.rnn_cell.BasicLSTMCell(10)

output, _ = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32) # 실제 만드는것. state 를 받아서 사용하지 않기때문에 _로 줌

 

print(output.shape) #(?, 7, 10)



H = tf.reshape(output[:, -1 ,:], [-1,10]) # 7행 중에 맨 마지막 행을 가져와라. 2차원으로 reshape

W = tf.Variable(tf.random_normal(shape=[10,1]))

print(output[:, -1 ,:].shape)

print(H.shape)

B = tf.Variable(tf.random_normal(shape=[1]))

logit = tf.add(tf.matmul(H, W) , B)

 

loss = tf.reduce_mean(tf.square(Y - logit)) # MSE

optimizer = tf.train.AdamOptimizer(0.01).minimize(loss)

 

 

sess = tf.Session()

sess.run(tf.global_variables_initializer())

 

for e in range(1000):

sess.run(optimizer, feed_dict={X: x_train, Y: y_train})

if e % 100 == 0 :

print(sess.run(loss, feed_dict={X: x_train, Y: y_train}))

 

pred = sess.run(logit, feed_dict={X: x_test})

 

import matplotlib.pyplot as plt

 

plt.plot(pred)

plt.plot(y_test)

 

MNIST를 RNN으로

  • 분류 데이터를 RNN으로 돌려보자

import tensorflow as tf

 

(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()

 

x_train = x_train / 255

x_test = x_test / 255

 

y_train = tf.keras.utils.to_categorical(y_train)

y_test = tf.keras.utils.to_categorical(y_test)

print(x_train.shape)

print(y_train.shape)

# 그래프 그리기

tf.reset_default_graph()

 

X = tf.placeholder(tf.float32, shape=[None, 28, 28])

Y = tf.placeholder(tf.float32, shape=[None, 10])

 

cell = tf.nn.rnn_cell.BasicLSTMCell(100)

#cell = tf.nn.rnn_cell.BasicLSTMCell(28)

#cell = tf.nn.rnn_cell.MultiRNNCell([cell] * 2) # 똑같은 셀을 또 쌓는 법. input 갯수가 같아야해서 위cell 갯수를 28로 해야함.

output, _ = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)

 

# output shape => (None, 28, 100)

 

H = tf.reshape(output[:, -1, :], [-1, 100])

W = tf.Variable(tf.random_normal(shape=[100, 10]))

B = tf.Variable(tf.random_normal(shape=[10]))

logit = tf.add(tf.matmul(H, W), B)

 

loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logit, labels=Y)

loss = tf.reduce_mean(loss)

optimizer = tf.train.AdamOptimizer(0.01).minimize(loss)

 

pred = tf.nn.softmax(logit)

acc = tf.equal(tf.argmax(pred, axis=1), tf.argmax(Y, axis=1))

acc = tf.reduce_mean(tf.cast(acc, tf.float32))

sess = tf.Session()

sess.run(tf.global_variables_initializer())

 

epochs = 10

batch = 128

n_batch = len(x_train) // batch

 

for e in range(epochs):

for b in range(n_batch):

x = x_train[b * batch:(b+1) * batch]

y = y_train[b * batch:(b+1) * batch]

sess.run(optimizer, feed_dict={X: x, Y: y})

 

if b % 20 == 0:

print(sess.run(acc, feed_dict={X: x_train, Y: y_train}))

 

 

sess.run(acc, feed_dict={X: x_test, Y: y_test})

 

 

keras - MNlist

  • keras 사용방법 두가지를 leanear 모델로 설명

from tensorflow import keras

import numpy as np

 

#데이터 준비

 

(x, y), (xx, yy) = keras.datasets.mnist.load_data()

x = x.reshape(-1,28*28) /255

xx = xx.reshape(-1,28*28) /255

 

#one_hot_encording : y 10개 컬럼에 1~9까지 숫자가 맞는지 아닌지를 0,1로 넣어주는 작업

y= keras.utils.to_categorical(y)

yy=keras.utils.to_categorical(yy)

 

print(x.shape, y.shape)

print(xx.shape, yy.shape)

#모델 만들기

model = keras.models.Sequential()

# model.add(keras.layers.Dense(

# 10, input_shape=(28*28, ), activation=keras.activations.softmax

# ))

#hiden layer에서는 relu Activation fuc를 쓰고

model.add(keras.layers.Dense(128, input_shape=(784, )))

model.add(keras.layers.BatchNormalization())

model.add(keras.layers.Activation(keras.activations.relu))

model.add(keras.layers.Dropout(0.2)) # 0.2를 끄고 돌려라, 평가는 자동으로 100

 

#output layer에서는 softmax Activation fuc를 쓴다

 

model.add(keras.layers.Dense(10)) # 최초에만 input_shape를 만들고 후에는 자동으로 맞춰줌

model.add(keras.layers.Activation(keras.activations.softmax))

 

# sigmoid 는 이진분류, 다중분류는 softmax activation function을 쓴다

#relu는 layer를 쓸때 쓰는 activation

model.compile(loss=keras.losses.categorical_crossentropy,

optimizer=keras.optimizers.Adam(lr=0.01),

metrics=[keras.metrics.categorical_accuracy])

 

model.summary()

# 7850 W (784,10) 784*10 + B 10 => 7850 개 param

 

model.fit(x, y, epochs=10,batch_size=256)

 

#평가

model.evaluate(xx,yy)

 

pred = model.predict(xx[1].reshape(-1, 784))

 

import matplotlib.pyplot as plt

plt.imshow(xx[1].reshape(28, 28))

print(np.argmax(pred))

print(np.argmax(yy[1]))

 

 

 

 

fashion mnist keras

 

 

from tensorflow import keras

import numpy as np

 

#데이터 준비

 

(x, y), (xx, yy) = keras.datasets.fashion_mnist.load_data()

print(x.shape, y.shape)

 

 

x = x.reshape(-1,28*28) /255

xx = xx.reshape(-1,28*28) /255

 

#one_hot_encording : y 10개 컬럼에 1~9까지 숫자가 맞는지 아닌지를 0,1로 넣어주는 작업

y= keras.utils.to_categorical(y)

yy=keras.utils.to_categorical(yy)

 

print(x.shape, y.shape)

print(xx.shape, yy.shape)

 

#모델 만들기

model = keras.models.Sequential()

# model.add(keras.layers.Dense(

# 10, input_shape=(28*28, ), activation=keras.activations.softmax

# ))

#hiden layer에서는 relu Activation fuc를 쓰고

model.add(keras.layers.Dense(128, input_shape=(784, )))

model.add(keras.layers.BatchNormalization())

model.add(keras.layers.Activation(keras.activations.relu))

#model.add(keras.layers.Dropout(0.2)) # 0.2를 끄고 돌려라, 평가는 자동으로 100

 

#output layer에서는 softmax Activation fuc를 쓴다

 

model.add(keras.layers.Dense(10)) # 최초에만 input_shape를 만들고 후에는 자동으로 맞춰줌

model.add(keras.layers.Activation(keras.activations.softmax))

 

# sigmoid 는 이진분류, 다중분류는 softmax activation function을 쓴다

#relu는 layer를 쓸때 쓰는 activation

model.compile(loss=keras.losses.categorical_crossentropy,

optimizer=keras.optimizers.Adam(lr=0.01),

metrics=[keras.metrics.categorical_accuracy])

 

model.summary()

model.fit(x, y, epochs=10,batch_size=128, validation_split=0.2 )

#batch 사이즈 줄이면 확률 높아짐

#validation_split 추가

 

model.history.history.keys()

import matplotlib.pyplot as plt

 

plt.plot(model.history.history['loss'])

plt.plot(model.history.history['categorical_accuracy'])

plt.legend(["loss","acc"])

 

 

plt.plot(model.history.history['val_loss'])

plt.plot(model.history.history['val_categorical_accuracy'])

 

#평가

model.evaluate(xx,yy)

 

pred = model.predict(xx[1].reshape(-1, 784))

 

import matplotlib.pyplot as plt

plt.imshow(xx[1].reshape(28, 28))

print(np.argmax(pred))

print(np.argmax(yy[1]))


temp 2020. 6. 1. 23:12 by solstyle

둘째날수업.ipynb 실습

 

3개의 히든레이어를 가지는 MLP(MNIST 분류기) - Tensorflow

 

첫번째 레이어 : 256 유닛

 

두번째 레이어 : 256 유닛

 

세번째 레이어 : 256 유닛

 

배치사이즈 128

 

반복횟수 : 500

 

#import tensorflow as tf

import tensorflow.compat.v1 as tf

tf.compat.v1.disable_eager_execution()

from tensorflow import keras

#데이터 준비

(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()

 

x_train = x_train.reshape( -1, 28*28)

x_test = x_test.reshape(-1, 28*28)

 

y_train = keras.utils.to_categorical(y_train)

y_test = keras.utils.to_categorical(y_test)

 

print( y_train.shape)

 

# 모델 만들기

X = tf.placeholder(tf.float32, shape=[None, 784])

Y = tf.placeholder(tf.float32, shape=[None, 10])

 

W1 = tf.Variable(tf.random_normal(shape=[784, 256]))

B1 = tf.Variable(tf.random_normal(shape=[256]))

 

H1 = tf.matmul(X, W1) + B1

H1 = tf.nn.relu(H1)

W2 = tf.Variable(tf.random_normal(shape=[256,256]))

B2 = tf.Variable(tf.random_normal(shape=[256]))

 

H2 = tf.matmul(H1,W2) + B2

W3 = tf.Variable(tf.random_normal(shape=[256,256]))

B3 = tf.Variable(tf.random_normal(shape=[256]))

 

H3 = tf.matmul(H2, W3) + B3

WW = tf.Variable(tf.random_normal(shape=[256,10]))

BB = tf.Variable(tf.random_normal(shape=[10]))

logit = tf.matmul(H3, WW) + BB

pred = tf.nn.softmax(logit) ##model

 

loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logit, labels=Y)

loss = tf.reduce_mean(loss)

 

optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss) ## train

 

acc = tf.equal(tf.argmax(pred,axis=1), tf.argmax(Y,axis=1))

acc = tf.reduce_mean(tf.cast(acc, tf.float32))

 

#print(tf.argmax(pred,axis=1))

##모델 학습 -세션

 

sess = tf.Session()

sess.run(tf.global_variables_initializer())

 

epochs = 500

batch = 128

n_batch = len(x_train) // batch

 

for e in range(epochs):

for b in range(n_batch):

x= x_train[b*batch : (b+1)*batch]

y= y_train[b*batch : (b+1)*batch]

sess.run(optimizer, feed_dict = {X:x, Y:y})

 

print(sess.run(acc, feed_dict={X:x_train, Y:y_train}))

 



#모델 평가

accuracy = sess.run(acc, feed_dict={X:x_test, Y:y_test})

print("{0: .2f}%".format(accuracy * 100))

Deep Learning Day4.ipynb 실습

 

3개의 히든레이어를 가지는 MLP(MNIST 분류기) - keras

 

첫번째 레이어 : 256 유닛

 

두번째 레이어 : 256 유닛

 

세번째 레이어 : 256 유닛

 

배치사이즈 128

 

반복횟수 : 500

import tensorflow.compat.v1 as tf

tf.compat.v1.disable_eager_execution()

 

from tensorflow import keras

(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()

 

x_train = x_train.reshape(-1, 28*28)/255

x_test = x_test.reshape(-1, 28*28)/255

 

y_train = keras.utils.to_categorical(y_train)

y_test = keras.utils.to_categorical(y_test)

 

#모델만들기

model = keras.models.Sequential()

 

model.add(keras.layers.Dense(256,activation='relu'))

model.add(keras.layers.Dense(256,activation='relu'))

model.add(keras.layers.Dense(256,activation='relu'))

model.add(keras.layers.Dense(10,activation='softmax'))

#학습

model.compile(loss= keras.losses.categorical_crossentropy,

optimizer= keras.optimizers.Adam(learning_rate=0.01),

metrics = [keras.metrics.categorical_accuracy]

)

model.fit(x_train, y_train, batch_size=128, epochs=10, verbose=1)

model.summary()

acc=model.evaluate(x_test,y_test)

print(acc)

3개의 히든레이어를 가지는 MLP(CIFAR10 분류기) - keras

 

첫번째 레이어 : 256 유닛

 

두번째 레이어 : 256 유닛

 

세번째 레이어 : 256 유닛

 

배치사이즈 128

 

반복횟수 : 500

 

(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()

print(x_train.shape)

x_train = x_train.reshape(-1,32*32*3)/255

x_test = x_test.reshape(-1,32*32*3)/255

 

y_train = keras.utils.to_categorical(y_train)

y_test = kerras.utils.to_categorical(y_test)

 

medel= keras.models.Sequential()

medel.add(keras.layers.Dense(255,activation='relu'))

medel.add(keras.layers.Dense(255,activation='relu'))

medel.add(keras.layers.Dense(255,activation='relu'))

medel.add(keras.layers.Dense(10,activation='softmax'))

 

model.compile(loss= keras.losses.categorical_crossentropy,

optimizer= keras.optimizers.Adam(learning_rate=0.01),

metrics = [keras.metrics.categorical_accuracy]

)

model.fit(x_train, y_train, batch_size=128,epochs=10,verbose=1)

 

acc = model.evaluate(x_test,y_test)

model.summary()

print(acc)

 

 

7개의 레이어를 가지는 CNN(MNIST 분류기) - keras

#import tensorflow as tf

import tensorflow.compat.v1 as tf

tf.compat.v1.disable_eager_execution()

 

from tensorflow import keras

 

#데이터준비

(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()

 

print(x_train.shape)

x_train = x_train.reshape(-1, 28,28,1) /255

x_test = x_test.reshape(-1,28,28,1)/255

 

y_train = keras.utils.to_categorical(y_train)

y_test = keras.utils.to_categorical(y_test)




#모델만들기

model = keras.models.Sequential()

 

model.add(keras.layers.Conv2D(16,(3,3),strides=(1,1), activation="relu", padding="same",input_shape=(28,28,1)))

model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))

#model.add(keras.layers.Activation(keras.activations.relu))

 

model.add(keras.layers.Conv2D(32,(3,3),strides=(1,1),padding="same",activation="relu"))

model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))

 

model.add(keras.layers.Flatten())

model.add(keras.layers.Dense(500))

model.add(keras.layers.Dense(10, activation='softmax'))

#model.add(keras.layers.Activation(keras.activations.softmax))



#모델 compile

model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.01),

loss= keras.losses.categorical_crossentropy,

metrics=[keras.metrics.categorical_accuracy])

 

 

model.fit(x_train,y_train,batch_size=128, epochs=10, verbose=1)

 

acc= model.evaluate(x_test,y_test)

model.summary()

 

print(acc)

 



 

1 
BLOG main image
solstyle
From the Depth of My Drawer
by solstyle

카테고리

분류 전체보기 (56)
SolStorys (27)
temp (3)
SolBox (25)

태그목록

달력

«   2025/02   »
1
2 3 4 5 6 7 8
9 10 11 12 13 14 15
16 17 18 19 20 21 22
23 24 25 26 27 28
tistory!get rss Tistory Tistory 가입하기!

최근에 올라온 글

최근에 달린 댓글

최근에 받은 트랙백