159 lines
3.6 KiB
Python
159 lines
3.6 KiB
Python
import tensorflow as tf
|
|
import numpy as np
|
|
from matplotlib import pyplot as plt
|
|
from PIL import Image, ImageTk
|
|
|
|
|
|
def __prep_data():
|
|
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
|
|
|
|
x_train = x_train.astype("float32") / 255.
|
|
x_test = x_test.astype("float32") / 255.
|
|
|
|
x_train = x_train.reshape(-1, 784)
|
|
x_test = x_test.reshape(-1, 784)
|
|
|
|
y_train = tf.keras.utils.to_categorical(y_train, 10)
|
|
y_test = tf.keras.utils.to_categorical(y_test, 10)
|
|
|
|
return (x_train, y_train), (x_test, y_test)
|
|
|
|
|
|
def __prep_conf_matr(m):
|
|
output_matrix = np.zeros([10, 10])
|
|
|
|
(_, _), (x_test, y_test) = __prep_data()
|
|
pred = m.predict(x_test)
|
|
|
|
for i, v in enumerate(pred):
|
|
output_matrix[np.argmax(v)][np.argmax(y_test[i])] += 1
|
|
|
|
return output_matrix
|
|
|
|
|
|
def __plot_conf_matr(m):
|
|
matr = __prep_conf_matr(m)
|
|
|
|
_, ax = plt.subplots()
|
|
|
|
ax.matshow(matr, cmap = plt.cm.Blues)
|
|
|
|
for i, x in enumerate(matr):
|
|
for j, y in enumerate(x):
|
|
ax.text(i,
|
|
j,
|
|
str(round(y)),
|
|
va = "center",
|
|
ha = "center")
|
|
|
|
plt.show()
|
|
|
|
|
|
def __conf_matr_to_binary(index, matr):
|
|
bm = np.zeros([2, 2])
|
|
|
|
for i, x in enumerate(matr):
|
|
for j, y in enumerate(x):
|
|
bm[int(index != i), int(index != j)] += y
|
|
|
|
return bm
|
|
|
|
|
|
def __calc_accuracy_for(index, bcm):
|
|
return (bcm[0,0] + bcm[1,1]) / (bcm[0,0] + bcm[0,1] + bcm[1,0] + bcm[1,1])
|
|
|
|
|
|
def __calc_precision_for(index, bcm):
|
|
return (bcm[0,0]) / (bcm[0,0] + bcm[0,1])
|
|
|
|
|
|
def __calc_recall_for(index, bcm):
|
|
return (bcm[0,0]) / (bcm[0,0] + bcm[1,0])
|
|
|
|
|
|
def __calc_specificity_for(index, bcm):
|
|
return (bcm[1,1]) / (bcm[0,1] + bcm[1,1])
|
|
|
|
|
|
def __calc_f1_score_for(index, bcm):
|
|
p = __calc_precision_for(index, bcm)
|
|
r = __calc_recall_for(index, bcm)
|
|
|
|
return 2 * p * r / (p + r)
|
|
|
|
|
|
def __plot_acc_rate(h):
|
|
plt.plot(h.history['accuracy'], label = 'train_acc')
|
|
plt.plot(h.history['val_accuracy'], label = 'valid_acc')
|
|
|
|
plt.legend()
|
|
|
|
plt.show()
|
|
|
|
|
|
def train(m, label):
|
|
(x_train, y_train), (x_test, y_test) = __prep_data()
|
|
|
|
m.compile(optimizer = "adam",
|
|
loss = "categorical_crossentropy",
|
|
metrics = ["accuracy"])
|
|
|
|
h = m.fit(x_train,
|
|
y_train,
|
|
epochs = 30,
|
|
batch_size = 512,
|
|
validation_data = (x_test, y_test))
|
|
|
|
m.save_weights(f"save-{label}.weights.h5")
|
|
|
|
__plot_acc_rate(h)
|
|
|
|
|
|
def model_quality(m, label):
|
|
(_, _), (x_test, y_test) = __prep_data()
|
|
|
|
m.compile(optimizer = "adam",
|
|
loss = "categorical_crossentropy",
|
|
metrics = ["accuracy"])
|
|
|
|
m.load_weights(f"save-{label}.weights.h5")
|
|
|
|
__plot_conf_matr(m)
|
|
|
|
cm = __prep_conf_matr(m)
|
|
|
|
for i in range(10):
|
|
bcm = __conf_matr_to_binary(i, cm)
|
|
|
|
acc = __calc_accuracy_for(i, bcm)
|
|
pre = __calc_precision_for(i, bcm)
|
|
rec = __calc_recall_for(i, bcm)
|
|
f1s = __calc_f1_score_for(i, bcm)
|
|
spe = __calc_specificity_for(i, bcm)
|
|
|
|
print(f"{i}: acc={acc} pre={pre} rec={rec} f1s={f1s} spe={spe}")
|
|
|
|
|
|
def classify(m, label, imgfn):
|
|
m.compile(optimizer = "adam",
|
|
loss = "categorical_crossentropy",
|
|
metrics = ["accuracy"])
|
|
|
|
m.load_weights(f"save-{label}.weights.h5")
|
|
|
|
img = Image.open(imgfn).convert("L")
|
|
flat_img = np.array(img).reshape(-1, 784)
|
|
|
|
res = m.predict(flat_img)
|
|
|
|
plt.imshow(flat_img.reshape(28, 28),
|
|
cmap = "gray")
|
|
|
|
plt.title(np.argmax(res))
|
|
plt.show()
|
|
|
|
|
|
def classify_live(m, label):
|
|
import lv
|
|
lv.classify_live(m, label)
|