티스토리 뷰
1
2
3
4
5
6
7
8
9
10
|
# CPU 처리
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import matplotlib.pyplot as plt
from tensorflow.python.keras import datasets
from tensorflow import keras
|
cs |
1
2
3
4
5
6
7
8
9
10
11
12
13
14
|
batch_size = 128
epochs = 1 # 시간절약
IMG_HEIGHT = 150
IMG_WIDTH = 150
train_dir = None
validation_dir = None
train_cats_dir = None
train_dogs_dir = None
validation_cats_dir = None
validation_dogs_dir = None
train_data_gen = None
total_train = None
total_val = None
val_data_gen = None
|
cs |
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
|
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()
train_images = train_images.reshape((60000, 28, 28, 1))
test_images = test_images.reshape((10000, 28, 28, 1))
# 픽셀 값을 0~1 사이로 정규화합니다.
train_images, test_images = train_images / 255.0, test_images / 255.0
_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)
PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
train_cats_dir = os.path.join(train_dir, 'cats') # directory with our training cat pictures
train_dogs_dir = os.path.join(train_dir, 'dogs') # directory with our training dog pictures
validation_cats_dir = os.path.join(validation_dir, 'cats') # directory with our validation cat pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs') # directory with our validation dog pictures
num_cats_tr = len(os.listdir(train_cats_dir))
num_dogs_tr = len(os.listdir(train_dogs_dir))
num_cats_val = len(os.listdir(validation_cats_dir))
num_dogs_val = len(os.listdir(validation_dogs_dir))
total_train = num_cats_tr + num_dogs_tr
total_val = num_cats_val + num_dogs_val
print('total training cat images:', num_cats_tr)
print('total training dog images:', num_dogs_tr)
print('total validation cat images:', num_cats_val)
print('total validation dog images:', num_dogs_val)
print("--")
print("Total training images:", total_train)
print("Total validation images:", total_val)
|
cs |
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz 11493376/11490434 [==============================] - 0s 0us/step Downloading data from https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip 68608000/68606236 [==============================] - 0s 0us/step total training cat images: 1000 total training dog images: 1000 total validation cat images: 500 total validation dog images: 500 -- Total training images: 2000 Total validation images: 1000
1
2
3
4
5
|
train_image_generator = ImageDataGenerator(rescale=1. / 255) # Generator for our training data
validation_image_generator = ImageDataGenerator(rescale=1. / 255) # Generator for our validation data
train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,directory=train_dir,shuffle=True,target_size=(IMG_HEIGHT,IMG_WIDTH),class_mode='binary')
val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size,directory=validation_dir,target_size=(IMG_HEIGHT, IMG_WIDTH),class_mode='binary')
sample_training_images, _ = next(train_data_gen)
|
cs |
Found 2000 images belonging to 2 classes.
Found 1000 images belonging to 2 classes.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
|
model = Sequential([
Conv2D(16, 3, padding='same',
activation='relu',
input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
MaxPooling2D(),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(512, activation='relu'),
Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
print('---------- MODEL SUMMARY -------------')
print(model.summary())
model.save('cats_and_dogs.h5')
print('======= 모델 훈련 종료 ======')
|
cs |
1
2
3
4
5
6
7
8
9
10
|
def train_model():
print('케라스에서 모델 호출')
model = keras.models.load_model('cats_and_dogs.h5')
history = model.fit_generator(train_data_gen,
steps_per_epoch=total_train // batch_size,
epochs=1,
validation_data=val_data_gen,
validation_steps=total_val //batch_size
)
return history
|
cs |
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
|
history = train_model()
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(1) # epochs 1은 시간절약
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
|
cs |
'5. 파이썬' 카테고리의 다른 글
[Telaviv] 코랩(colab) 사용서 (0) | 2020.05.29 |
---|---|
텐서플로/2020-05/텐진/ cifar10_model.ipynb (0) | 2020.05.29 |
[파이썬] 인공지능 개발환경 아나콘다(Anaconda) , 파이참(Pycharm) 설치하기 (0) | 2020.05.24 |
[파이썬 NL] 한글 자연어 처리기 Komoran, Hannanum, Kkma, Okt 성능 비교 (사이트 링크) (0) | 2020.05.24 |
파이썬/자연어/2020-05-24/ NLTK를 활용한 제인 오스틴의 엠마 분석 (0) | 2020.05.24 |
댓글
공지사항
최근에 올라온 글
최근에 달린 댓글
- Total
- Today
- Yesterday
링크
TAG
- docker
- COLAB
- JUnit
- jQuery
- Git
- Mlearn
- intellij
- Eclipse
- Mongo
- Java
- Django
- Algorithm
- AWS
- SQLAlchemy
- JPA
- SpringBoot
- mariadb
- Python
- tensorflow
- springMVC
- vscode
- FLASK
- terms
- nodejs
- ERD
- maven
- React
- database
- KAFKA
- Oracle
일 | 월 | 화 | 수 | 목 | 금 | 토 |
---|---|---|---|---|---|---|
1 | 2 | 3 | 4 | |||
5 | 6 | 7 | 8 | 9 | 10 | 11 |
12 | 13 | 14 | 15 | 16 | 17 | 18 |
19 | 20 | 21 | 22 | 23 | 24 | 25 |
26 | 27 | 28 | 29 | 30 | 31 |
글 보관함