Unet 연습 튜토리얼 파이썬 치트코드

Untitled
In [18]:
import glob
import shutil
import os
import random
from PIL import Image

import seaborn as sns

import pandas as pd
import numpy as np

from sklearn.model_selection import train_test_split,StratifiedKFold
In [104]:
from tensorflow.keras.layers import Dense, Input, Activation, Flatten
from tensorflow.keras.layers import BatchNormalization,Add,Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import LeakyReLU, ReLU, Conv2D, MaxPooling2D, BatchNormalization, Conv2DTranspose, UpSampling2D
from tensorflow.keras import callbacks
from tensorflow.keras import backend as K
In [6]:
mask_filenames = glob.glob('../input/imagedata128/masks/*')
mask_df = pd.DataFrame()
mask_df['filename'] = mask_filenames
mask_df['mask_percentage'] = 0
mask_df['labels'] = 0
mask_df.set_index('filename', inplace=True)

for file in mask_filenames:
    mask_df.loc[file, 'mask_percentage'] = np.array(Image.open(file)).sum()/(128*128*255)

mask_df.loc[mask_df.mask_percentage > 0, 'labels'] = 1
In [11]:
train_valid_filenames = glob.glob('../input/imagedata128/train/*')

train_filenames, valid_filenames = train_test_split(train_valid_filenames, stratify = mask_df.labels, test_size = 0.1, random_state = 10)
mask_train_filenames = [f.replace('/train', '/masks') for f in train_filenames]
mask_valid_filenames = [f.replace('/train', '/masks') for f in valid_filenames]
In [13]:
train_x = np.zeros((1000,128,128))
valid_x = np.zeros((100,128,128))
train_mask_y = np.zeros((1000,128,128))
valid_mask_y = np.zeros((100,128,128))
train_y = np.zeros((1000))
valid_y = np.zeros((100))

# 훈련용 1000개 
for (index, image) in enumerate(train_filenames[:1000]):
    train_x[index] = np.array(Image.open(image))
    
# 검증용 100개 및 변병여부 
for (index, image) in enumerate(valid_filenames[:100]):
    valid_x[index] = np.array(Image.open(image))
    
# 훈련용 마스크 답지 1000개 및 병변여부 
for (index, image) in enumerate(mask_train_filenames[:1000]):
    train_mask_y[index] = np.array(Image.open(image))
    train_y[index] = mask_df.loc[image, 'labels']
    
# 검증용 병변 답지 100개 및 병변여부 
for (index, image) in enumerate(mask_valid_filenames[:100]):
    valid_mask_y[index] = np.array(Image.open(image))
    valid_y[index] = mask_df.loc[image, 'labels']
    
In [14]:
train_x = train_x.reshape(1000,128,128,1)
valid_x = valid_x.reshape(100,128,128,1)

train_mask_y = train_x.reshape(1000,128,128,1)
valid_mask_y = valid_x.reshape(100,128,128,1)
In [ ]:
def contract_block(x, in_shape, out_shape) :
    x = Conv2D(filters = 64, kernel_size=(2,2), strides=(2,2), activation = None, padding="same")(x)
    x = ReLU()(x)
    x = BatchNormalization()(x)
In [105]:
def UNet(input_shape):
    
    # 아주 간단한 타입의 딥러닝 모델이 워킹하는지만 살펴봄.
    inp = Input(shape = (input_shape ))
    
    # 1단계 contracting
    x = Conv2D(filters = 64,kernel_size=3, strides =1, activation = None, padding="same")(inp)
    x = ReLU()(x)
    x = BatchNormalization()(x)
    
    x = Conv2D(filters = 64,kernel_size=3, strides =1, activation = None, padding="same")(x)
    x = ReLU()(x)
    x = BatchNormalization()(x)
    
    x = MaxPooling2D(2)(x)
    x = Conv2DTranspose(filters = 128, kernel_size = 1)(x)
    
    # 2단계 contracting
    x = Conv2D(filters = 128,kernel_size=3, strides =1, activation = None, padding="same")(x)
    x = ReLU()(x)
    x = BatchNormalization()(x)
    
    x = Conv2D(filters = 128,kernel_size=3, strides =1, activation = None, padding="same")(x)
    x = ReLU()(x)
    x = BatchNormalization()(x)
    
    x = MaxPooling2D(2)(x)
    x = Conv2DTranspose(filters = 256, kernel_size = 1)(x)    
    
    # Bottle neck
    x = Conv2D(filters = 256,kernel_size=3, strides =1, activation = None, padding="same")(x)
    x = ReLU()(x)
    x = BatchNormalization()(x)
    
    # 1단계 extracting
    x = Conv2D(filters = 128,kernel_size=3, strides =1, activation = None, padding="same")(x)
    x = ReLU()(x)
    x = BatchNormalization()(x)
    
    x = UpSampling2D()(x)
    x = ReLU()(x)
    x = BatchNormalization()(x)    
    
    x = Flatten()(x)
    
    x = Dense(32, activation = 'relu')(x)
    x = Dense(16, activation = 'relu')(x)
    out = Dense(1, activation = 'linear')(x)
    
    model = Model(inputs = inp, outputs=[out])
    
    return model
In [106]:
model = UNet(input_shape = (128, 128, 1))
model.compile(loss='mae', optimizer='adam')
In [107]:
model.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_27 (InputLayer)        (None, 128, 128, 1)       0         
_________________________________________________________________
conv2d_57 (Conv2D)           (None, 128, 128, 64)      640       
_________________________________________________________________
re_lu_57 (ReLU)              (None, 128, 128, 64)      0         
_________________________________________________________________
batch_normalization_v1_48 (B (None, 128, 128, 64)      256       
_________________________________________________________________
conv2d_58 (Conv2D)           (None, 128, 128, 64)      36928     
_________________________________________________________________
re_lu_58 (ReLU)              (None, 128, 128, 64)      0         
_________________________________________________________________
batch_normalization_v1_49 (B (None, 128, 128, 64)      256       
_________________________________________________________________
max_pooling2d_30 (MaxPooling (None, 64, 64, 64)        0         
_________________________________________________________________
conv2d_transpose_15 (Conv2DT (None, 64, 64, 128)       8320      
_________________________________________________________________
conv2d_59 (Conv2D)           (None, 64, 64, 128)       147584    
_________________________________________________________________
re_lu_59 (ReLU)              (None, 64, 64, 128)       0         
_________________________________________________________________
batch_normalization_v1_50 (B (None, 64, 64, 128)       512       
_________________________________________________________________
conv2d_60 (Conv2D)           (None, 64, 64, 128)       147584    
_________________________________________________________________
re_lu_60 (ReLU)              (None, 64, 64, 128)       0         
_________________________________________________________________
batch_normalization_v1_51 (B (None, 64, 64, 128)       512       
_________________________________________________________________
max_pooling2d_31 (MaxPooling (None, 32, 32, 128)       0         
_________________________________________________________________
conv2d_transpose_16 (Conv2DT (None, 32, 32, 256)       33024     
_________________________________________________________________
conv2d_61 (Conv2D)           (None, 32, 32, 256)       590080    
_________________________________________________________________
re_lu_61 (ReLU)              (None, 32, 32, 256)       0         
_________________________________________________________________
batch_normalization_v1_52 (B (None, 32, 32, 256)       1024      
_________________________________________________________________
conv2d_62 (Conv2D)           (None, 32, 32, 128)       295040    
_________________________________________________________________
re_lu_62 (ReLU)              (None, 32, 32, 128)       0         
_________________________________________________________________
batch_normalization_v1_53 (B (None, 32, 32, 128)       512       
_________________________________________________________________
up_sampling2d (UpSampling2D) (None, 64, 64, 128)       0         
_________________________________________________________________
re_lu_63 (ReLU)              (None, 64, 64, 128)       0         
_________________________________________________________________
batch_normalization_v1_54 (B (None, 64, 64, 128)       512       
_________________________________________________________________
flatten_23 (Flatten)         (None, 524288)            0         
_________________________________________________________________
dense_69 (Dense)             (None, 32)                16777248  
_________________________________________________________________
dense_70 (Dense)             (None, 16)                528       
_________________________________________________________________
dense_71 (Dense)             (None, 1)                 17        
=================================================================
Total params: 18,040,577
Trainable params: 18,038,785
Non-trainable params: 1,792
_________________________________________________________________
In [79]:
history = model.fit(train_x, [train_y], validation_data=(valid_x, [valid_y]),epochs = 5, batch_size = 10, verbose = 1)
Train on 1000 samples, validate on 100 samples
Epoch 1/5
1000/1000 [==============================] - 97s 97ms/sample - loss: 1.1645 - val_loss: 3.3693
Epoch 2/5
1000/1000 [==============================] - 102s 102ms/sample - loss: 0.4473 - val_loss: 0.3532
Epoch 3/5
1000/1000 [==============================] - 96s 96ms/sample - loss: 0.3900 - val_loss: 0.3493
Epoch 4/5
1000/1000 [==============================] - 262s 262ms/sample - loss: 0.2454 - val_loss: 0.3401
Epoch 5/5
1000/1000 [==============================] - 96s 96ms/sample - loss: 0.2300 - val_loss: 0.3420
In [ ]:
 

답글 남기기