본문 바로가기
컴퓨터과학

Tensorflow2 API 모델링

by 자유로운시간 2022. 5. 25.
반응형

텐서플로에서 딥러닝을 층을 쌓는,

즉, 모델링하는 방법은 크게 세 가지다.

 

1. Sequential API

2. Functional API

3. Subclassing

 

MNIST dataset과 CIFAR-100 datase의 두 가지 데이터를 가지고

위의 세 가지 모델을 구축하는 코드를 아래와 같이 정리했다.


Sequential API

 

 

MNIST dataset

import tensorflow as tf
from tensorflow import keras
import numpy as np


# 데이터 구성부분
mnist = keras.datasets.mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
print(x_train.shape)
# 채널 지정
x_train=x_train[...,np.newaxis]
print(x_train.shape)
x_test=x_test[...,np.newaxis]

print(len(x_train), len(x_test))


# Sequential Model
model = keras.Sequential([
    keras.layers.Conv2D(32, 3, activation = 'relu'),
    keras.layers.Conv2D(64, 3, activation = 'relu'),
    keras.layers.Flatten(),
    keras.layers.Dense(128, activation = 'relu'),
    keras.layers.Dense(10, activation = 'softmax')
])


model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=1)

model.evaluate(x_test,  y_test, verbose=2)

 

CIFAR-100 dataset

import tensorflow as tf
from tensorflow import keras

# 데이터 구성부분
cifar100 = keras.datasets.cifar100

(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
print(len(x_train), len(x_test))


# # Sequential Model 구성
model = keras.Sequential([
    keras.layers.Conv2D(16, 3, activation = 'relu'),
    keras.layers.MaxPool2D((2,2)),
    keras.layers.Conv2D(32, 3, activation = 'relu'),
    keras.layers.MaxPool2D((2,2)),    
    keras.layers.Flatten(),
    keras.layers.Dense(256, activation = 'relu'),
    keras.layers.Dense(256, activation = 'softmax')


model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=1)

model.evaluate(x_test,  y_test, verbose=2)

%reset

 

Functional API

 

MNIST dataset

import tensorflow as tf
from tensorflow import keras
import numpy as np


mnist = keras.datasets.mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

x_train=x_train[...,np.newaxis]
x_test=x_test[...,np.newaxis]

print(len(x_train), len(x_test))




inputs = keras.Input(shape = (28, 28, 1))

x = keras.layers.Conv2D(32, 3, activation = 'relu')(inputs)
x = keras.layers.Conv2D(64, 3, activation = 'relu')(x)
x = keras.layers.Flatten()(x)
x = keras. layers.Dense(128, activation = 'relu')(x)
predictions = keras.layers.Dense(10, activation = 'softmax')(x)

model = keras.Model(inputs = inputs, outputs = predictions)


model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=1)

model.evaluate(x_test,  y_test, verbose=2)

 

CIFAR-100 dataset

import tensorflow as tf
from tensorflow import keras

cifar100 = keras.datasets.cifar100

(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
print(len(x_train), len(x_test))


# Functional API model
inputs = keras.Input(shape = (32, 32, 3))

x = keras.layers.Conv2D(16, 3, activation = 'relu')(inputs)
x = keras.layers.MaxPool2D((2,2))(x)
x = keras.layers.Conv2D(32, 3, activation = 'relu')(x)
x = keras.layers.MaxPool2D((2,2))(x)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(256, activation = 'relu')(x)
predictions = keras.layers.Dense(100, activation = 'softmax')(x)

model = keras.Model(inputs = inputs, outputs = predictions)


model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=1)

model.evaluate(x_test,  y_test, verbose=2)

%reset

 

 


 

Subclassing

 

MNIST dataset

import tensorflow as tf
from tensorflow import keras
import numpy as np


# 데이터 구성부분
mnist = keras.datasets.mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

x_train=x_train[...,np.newaxis]
x_test=x_test[...,np.newaxis]

print(len(x_train), len(x_test))



# Subclassing을 활용한 Model
class CustomModel(keras.Model):
    def __init__(self):
        super().__init__()
        self.conv1 = keras.layers.Conv2D(32, 3, activation = 'relu')
        self.conv2 = keras.layers.Conv2D(64, 3, activation = 'relu')
        self.flatten = keras.layers.Flatten()
        self.fc1 = keras.layers.Dense(128, activation = 'relu')
        self.fc2 = keras.layers.Dense(10, activation = 'softmax')

    def call(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.flatten(x)
        x = self.fc1(x)
        x = self.fc2(x)

        return x

model = CustomModel()


model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=1)

model.evaluate(x_test,  y_test, verbose=2)

%reset

 

CIFAR-100 dataset

 

import tensorflow as tf
from tensorflow import keras


# 데이터 구성부분
cifar100 = keras.datasets.cifar100

(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
print(len(x_train), len(x_test))


# Subclassing을 활용한 Model
class CustomModel(keras.Model):
    def __init__(self):
        super().__init__()
        self.conv1 = keras.layers.Conv2D(16, 3, activation = 'relu')
        self.maxpool1 = keras.layers.MaxPool2D((2,2))
        self.conv2 = keras.layers.Conv2D(32, 3, activation = 'relu')
        self.maxpool2 = keras.layers.MaxPool2D((2,2))
        self.flatten = keras.layers.Flatten()
        self.fc1 = keras.layers.Dense(256, activation = 'relu')
        self.fc2 = keras.layers.Dense(100, activation = 'softmax')

    def call(self, x):
        x = self.conv1(x)
        x = self.maxpool1(x)
        x = self.conv2(x)
        x = self.maxpool2(x)
        x = self.flatten(x)
        x = self.fc1(x)
        x = self.fc2(x)

        return x

model = CustomModel()


model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(x_train, y_train, epochs=1)

model.evaluate(x_test,  y_test, verbose=2)

 

반응형