둘째날수업.ipynb 실습
3개의 히든레이어를 가지는 MLP(MNIST 분류기) - Tensorflow
첫번째 레이어 : 256 유닛
두번째 레이어 : 256 유닛
세번째 레이어 : 256 유닛
배치사이즈 128
반복횟수 : 500
#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.compat.v1.disable_eager_execution()
from tensorflow import keras
#데이터 준비
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape( -1, 28*28)
x_test = x_test.reshape(-1, 28*28)
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
print( y_train.shape)
# 모델 만들기
X = tf.placeholder(tf.float32, shape=[None, 784])
Y = tf.placeholder(tf.float32, shape=[None, 10])
W1 = tf.Variable(tf.random_normal(shape=[784, 256]))
B1 = tf.Variable(tf.random_normal(shape=[256]))
H1 = tf.matmul(X, W1) + B1
H1 = tf.nn.relu(H1)
W2 = tf.Variable(tf.random_normal(shape=[256,256]))
B2 = tf.Variable(tf.random_normal(shape=[256]))
H2 = tf.matmul(H1,W2) + B2
W3 = tf.Variable(tf.random_normal(shape=[256,256]))
B3 = tf.Variable(tf.random_normal(shape=[256]))
H3 = tf.matmul(H2, W3) + B3
WW = tf.Variable(tf.random_normal(shape=[256,10]))
BB = tf.Variable(tf.random_normal(shape=[10]))
logit = tf.matmul(H3, WW) + BB
pred = tf.nn.softmax(logit) ##model
loss = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logit, labels=Y)
loss = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss) ## train
acc = tf.equal(tf.argmax(pred,axis=1), tf.argmax(Y,axis=1))
acc = tf.reduce_mean(tf.cast(acc, tf.float32))
#print(tf.argmax(pred,axis=1))
##모델 학습 -세션
sess = tf.Session()
sess.run(tf.global_variables_initializer())
epochs = 500
batch = 128
n_batch = len(x_train) // batch
for e in range(epochs):
for b in range(n_batch):
x= x_train[b*batch : (b+1)*batch]
y= y_train[b*batch : (b+1)*batch]
sess.run(optimizer, feed_dict = {X:x, Y:y})
print(sess.run(acc, feed_dict={X:x_train, Y:y_train}))
#모델 평가
accuracy = sess.run(acc, feed_dict={X:x_test, Y:y_test})
print("{0: .2f}%".format(accuracy * 100))
Deep Learning Day4.ipynb 실습
3개의 히든레이어를 가지는 MLP(MNIST 분류기) - keras
첫번째 레이어 : 256 유닛
두번째 레이어 : 256 유닛
세번째 레이어 : 256 유닛
배치사이즈 128
반복횟수 : 500
import tensorflow.compat.v1 as tf
tf.compat.v1.disable_eager_execution()
from tensorflow import keras
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(-1, 28*28)/255
x_test = x_test.reshape(-1, 28*28)/255
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
#모델만들기
model = keras.models.Sequential()
model.add(keras.layers.Dense(256,activation='relu'))
model.add(keras.layers.Dense(256,activation='relu'))
model.add(keras.layers.Dense(256,activation='relu'))
model.add(keras.layers.Dense(10,activation='softmax'))
#학습
model.compile(loss= keras.losses.categorical_crossentropy,
optimizer= keras.optimizers.Adam(learning_rate=0.01),
metrics = [keras.metrics.categorical_accuracy]
)
model.fit(x_train, y_train, batch_size=128, epochs=10, verbose=1)
model.summary()
acc=model.evaluate(x_test,y_test)
print(acc)
3개의 히든레이어를 가지는 MLP(CIFAR10 분류기) - keras
첫번째 레이어 : 256 유닛
두번째 레이어 : 256 유닛
세번째 레이어 : 256 유닛
배치사이즈 128
반복횟수 : 500
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
print(x_train.shape)
x_train = x_train.reshape(-1,32*32*3)/255
x_test = x_test.reshape(-1,32*32*3)/255
y_train = keras.utils.to_categorical(y_train)
y_test = kerras.utils.to_categorical(y_test)
medel= keras.models.Sequential()
medel.add(keras.layers.Dense(255,activation='relu'))
medel.add(keras.layers.Dense(255,activation='relu'))
medel.add(keras.layers.Dense(255,activation='relu'))
medel.add(keras.layers.Dense(10,activation='softmax'))
model.compile(loss= keras.losses.categorical_crossentropy,
optimizer= keras.optimizers.Adam(learning_rate=0.01),
metrics = [keras.metrics.categorical_accuracy]
)
model.fit(x_train, y_train, batch_size=128,epochs=10,verbose=1)
acc = model.evaluate(x_test,y_test)
model.summary()
print(acc)
7개의 레이어를 가지는 CNN(MNIST 분류기) - keras
#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.compat.v1.disable_eager_execution()
from tensorflow import keras
#데이터준비
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
print(x_train.shape)
x_train = x_train.reshape(-1, 28,28,1) /255
x_test = x_test.reshape(-1,28,28,1)/255
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
#모델만들기
model = keras.models.Sequential()
model.add(keras.layers.Conv2D(16,(3,3),strides=(1,1), activation="relu", padding="same",input_shape=(28,28,1)))
model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
#model.add(keras.layers.Activation(keras.activations.relu))
model.add(keras.layers.Conv2D(32,(3,3),strides=(1,1),padding="same",activation="relu"))
model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(500))
model.add(keras.layers.Dense(10, activation='softmax'))
#model.add(keras.layers.Activation(keras.activations.softmax))
#모델 compile
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.01),
loss= keras.losses.categorical_crossentropy,
metrics=[keras.metrics.categorical_accuracy])
model.fit(x_train,y_train,batch_size=128, epochs=10, verbose=1)
acc= model.evaluate(x_test,y_test)
model.summary()
print(acc)