⚙️ Inception Net
📌 ImageNet으로 학습한 모델 가져오기
base_model = InceptionV3(weights='imagenet', include_top=False)
📌 기존 모델의 layer를 이용한 재학습
for layer in base_model.layers:
layer.trainable = False
num_classes = len(os.listdir(train_data_sub_folder))
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.2)(x)
x = Dense(1024, activation="relu")(x)
x = Dropout(0.2)(x)
x = Dense(1024, activation="relu")(x)
x = Dropout(0.2)(x)
x = Dense(512, activation="relu")(x)
preds = Dense(num_classes, activation="softmax")(x)
model = Model(inputs = base_model.input, outputs = preds)
adam = optimizers.Adam(learning_rate=0.001, beta_1 = 0.9, beta_2 = 0.999)
model.compile(optimizer = adam, loss = "categorical_crossentropy", metrics = ["accuracy"])
train_datagen = ImageDataGenerator(
validation_split=0.1,
rescale = 1./255.,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
fill_mode='nearest'
)
valid_datagen=ImageDataGenerator(
validation_split=0.1,
rescale = 1./255.,
)
image_size = 299
batch_size = 1024
train_generator = train_datagen.flow_from_directory(
train_data_sub_folder,
subset = "training",
target_size = (image_size, image_size),
seed = 42, shuffle=True,
batch_size = batch_size,
class_mode = "categorical"
)
valid_generator = valid_datagen.flow_from_directory(
train_data_sub_folder,
subset = "training",
target_size = (image_size, image_size),
seed = 42, shuffle=False,
batch_size = batch_size,
class_mode = "categorical"
)
import os
cp_path = "training/cp-{epoch:04d}.ckpt"
cp_dir = os.path.dirname(cp_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(
cp_path,
verbose = 1,
save_weights_only = True
)
es_callback = tf.keras.callbacks.EarlyStopping(
monitor = "val_loss",
patience = 3
)
num_epochs = 50
my_steps_per_epoch = int(train_generator.n / batch_size)
history = model.fit_generator(
train_generator,
epochs = num_epochs,
validation_data = valid_generator,
steps_per_epoch = my_steps_per_epoch,
use_multiprocessing = True,
callbacks = [cp_callback, es_callback]
)
📌 학습시킨 모델을 이용한 예측
test_set = []
test_set_ids = []
for curImage in os.listdir('/content/test'):
test_set_ids.append(os.path.splitext(curImage)[0])
curImage = cv2.imread('/content/test/'+curImage)
test_set.append(cv2.resize(curImage,(image_size, image_size)))
test_set = np.array(test_set, np.float32)/255.0
predictions= model.predict(test_set)
classes= {index:breed for breed,index in train_generator.class_indices.items()}
column_names = [classes[i] for i in range(120)]
predictions_df = pd.DataFrame(predictions)
predictions_df.columns = column_names
predictions_df.insert(0,'id', test_set_ids)
predictions_df.set_index('id',inplace=True)
predictions_df.to_csv('final_submission.csv',sep=",")