목차
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
import numpy as np
import datetime
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential
from keras.layers import Dense, Flatten, Reshape, LeakyReLU, BatchNormalization
from keras.optimizers import Adam
# 하이퍼파라미터 설정
learning\_rate = 0.0002
beta\_1 = 0.5
epoch = 30000
batch\_size = 64
save\_interval = 300
# 데이터 로드 및 전처리
def load\_data():
(x\_train, \_), (\_, \_) = keras.datasets.mnist.load\_data()
x\_train = x\_train / 127.5 - 1.0 # Normalize to \[-1, 1\]
x\_train = np.expand\_dims(x\_train, axis=-1) # Add channel dimension
return x\_train
# 생성자 모델 정의
def build\_generator():
model = Sequential()
model.add(Dense(256, input\_dim=100))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(28 \* 28 \* 1, activation='tanh'))
model.add(Reshape((28, 28, 1)))
return model
# 판별자 모델 정의
def build\_discriminator():
model = Sequential()
model.add(Flatten(input\_shape=(28, 28, 1)))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation='sigmoid'))
return model
# GAN 모델 정의
def build\_gan(generator, discriminator):
model = Sequential()
model.add(generator)
model.add(discriminator)
return model
# 모델 컴파일
discriminator = build\_discriminator()
discriminator.compile(loss='binary\_crossentropy', optimizer=Adam(learning\_rate, beta\_1), metrics=\['accuracy'\])
generator = build\_generator()
discriminator.trainable = False
gan = build\_gan(generator, discriminator)
gan.compile(loss='binary\_crossentropy', optimizer=Adam(learning\_rate, beta\_1))
# Custom TensorBoard callback
class CustomTensorBoardCallback(tf.keras.callbacks.Callback):
def \_\_init\_\_(self, log\_dir):
super(CustomTensorBoardCallback, self).\_\_init\_\_()
self.log\_dir = log\_dir
self.writer = tf.summary.create\_file\_writer(log\_dir)
def write\_log(self, name, loss, batch\_no):
with self.writer.as\_default():
tf.summary.scalar(name, loss, step=batch\_no)
self.writer.flush()
# 훈련 함수 정의
def train(epochs, batch\_size=128, save\_interval=50):
x\_train = load\_data()
half\_batch = int(batch\_size / 2)
dis\_losses = \[\]
gen\_losses = \[\]
for epoch in range(epochs):
# 실제 이미지로 훈련
idx = np.random.randint(0, x\_train.shape\[0\], half\_batch)
real\_imgs = x\_train\[idx\]
# 가짜 이미지로 훈련
noise = np.random.normal(0, 1, (half\_batch, 100))
gen\_imgs = generator.predict(noise)
# 훈련 데이터와 라벨
real\_y = np.ones((half\_batch, 1))
fake\_y = np.zeros((half\_batch, 1))
# 판별자 훈련
d\_loss\_real = discriminator.train\_on\_batch(real\_imgs, real\_y)
d\_loss\_fake = discriminator.train\_on\_batch(gen\_imgs, fake\_y)
d\_loss = 0.5 \* np.add(d\_loss\_real, d\_loss\_fake)
# 생성자 훈련
noise = np.random.normal(0, 1, (batch\_size, 100))
valid\_y = np.ones((batch\_size, 1))
g\_loss = gan.train\_on\_batch(noise, valid\_y)
# 진행 상황 출력
print(f"{epoch + 1} \[D loss: {d\_loss\[0\]}, acc.: {100 \* d\_loss\[1\]}%\] \[G loss: {g\_loss}\]")
custom\_tensorboard\_callback.write\_log('discriminator\_loss', np.mean(dis\_losses), epoch)
custom\_tensorboard\_callback.write\_log('generator\_loss', np.mean(gen\_losses), epoch)
# 이미지 샘플 저장
if (epoch + 1) % save\_interval == 0:
save\_imgs(epoch + 1)
# 이미지 샘플 저장 함수 정의
def save\_imgs(epoch):
r, c = 5, 5
noise = np.random.normal(0, 1, (r \* c, 100))
gen\_imgs = generator.predict(noise)
gen\_imgs = 0.5 \* gen\_imgs + 0.5
fig, axs = plt.subplots(r, c)
count = 0
for i in range(r):
for j in range(c):
axs\[i, j\].imshow(gen\_imgs\[count, :, :, 0\], cmap='gray')
axs\[i, j\].axis('off')
count += 1
plt.savefig('images/image\_at\_epoch\_{:04d}.png'.format(epoch))
# plt.close()
# Define log directory
log\_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
# Instantiate the custom callback
custom\_tensorboard\_callback = CustomTensorBoardCallback(log\_dir=log\_dir)
# Train the model
train(epochs=epoch, batch\_size=batch\_size, save\_interval=save\_interval)
결과
모델 훈련 후 TensorBoard에서 성능 확인하기
tensorboard --logdir=logs/fit
'machine learning' 카테고리의 다른 글
Tensorboard 사용하기 (0) | 2024.07.08 |
---|---|
AttributeError: module 'tensorflow' has no attribute 'Summary' (0) | 2024.07.08 |
keras_contrib 인스톨 하기 (0) | 2024.07.08 |
특정 파이썬 버전으로 가상환경 설치하기 (0) | 2024.07.08 |
python으로 새로운 가상환경 만들기 (0) | 2024.07.07 |