initial commit
This commit is contained in:
commit
6f93c696fd
54
find2.py
Normal file
54
find2.py
Normal file
@ -0,0 +1,54 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
img_size = (150, 150)
|
||||
|
||||
from tensorflow.keras import models as m
|
||||
from tensorflow.keras import layers as l
|
||||
from tensorflow.keras import optimizers as o
|
||||
from PIL import Image
|
||||
from sys import argv
|
||||
from os import listdir as ls
|
||||
import numpy as np
|
||||
|
||||
model = m.Sequential([
|
||||
l.Input(shape = (*img_size, 3)),
|
||||
|
||||
l.Conv2D(96, (11, 11), strides = 4, activation = "relu"),
|
||||
l.BatchNormalization(),
|
||||
l.MaxPooling2D((3, 3), strides = 2),
|
||||
|
||||
l.Conv2D(192, (5, 5), activation = "relu", padding = "same"),
|
||||
l.BatchNormalization(),
|
||||
l.MaxPooling2D((3, 3), strides = 2),
|
||||
|
||||
l.Conv2D(256, (3, 3), activation = "relu", padding = "same"),
|
||||
l.Conv2D(256, (3, 3), activation = "relu", padding = "same"),
|
||||
l.Conv2D(160, (3, 3), activation = "relu", padding = "same"),
|
||||
l.BatchNormalization(),
|
||||
l.MaxPooling2D((3, 3), strides = 2),
|
||||
|
||||
l.Flatten(),
|
||||
l.Dense(1024, activation = "relu"),
|
||||
l.Dropout(0.5),
|
||||
l.Dense(1024, activation = "relu"),
|
||||
l.Dropout(0.5),
|
||||
l.Dense(10, activation = "softmax"),
|
||||
])
|
||||
|
||||
model.compile(optimizer = o.Adam(learning_rate = 0.0001),
|
||||
loss = "categorical_crossentropy",
|
||||
metrics = ["accuracy"])
|
||||
|
||||
model.load_weights("w2.weights.h5")
|
||||
|
||||
if len(argv) >= 2:
|
||||
for i in argv[1:]:
|
||||
with Image.open(i) as im:
|
||||
im = im.resize((150, 150), Image.Resampling.LANCZOS)
|
||||
im = np.divide(np.array(im),
|
||||
np.array(255.))
|
||||
|
||||
res = model.predict(np.array([im]))
|
||||
print(res)
|
||||
print(np.argmax(res))
|
||||
print(sorted(ls("../ds/raw-img"))[np.argmax(res)])
|
||||
62
test1.py
Normal file
62
test1.py
Normal file
@ -0,0 +1,62 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||
|
||||
img_size = (227, 227)
|
||||
batch_size = 128
|
||||
extract_path="../ds/raw-img"
|
||||
|
||||
datagen = ImageDataGenerator(
|
||||
rescale=1.0/255,
|
||||
validation_split=0.2
|
||||
)
|
||||
|
||||
def __dg(subset):
|
||||
return datagen.flow_from_directory(extract_path,
|
||||
target_size = img_size,
|
||||
batch_size = batch_size,
|
||||
class_mode = "categorical",
|
||||
subset = subset,
|
||||
shuffle = True)
|
||||
|
||||
train_generator = __dg("training")
|
||||
val_generator = __dg("validation")
|
||||
|
||||
|
||||
from tensorflow.keras import models as m
|
||||
from tensorflow.keras import layers as l
|
||||
from tensorflow.keras import optimizers as o
|
||||
|
||||
model = m.Sequential([
|
||||
l.Input(shape=(227, 227, 3)),
|
||||
l.Conv2D(96, (11, 11), strides=4, activation='relu'),
|
||||
l.BatchNormalization(),
|
||||
l.MaxPooling2D((3, 3), strides=2),
|
||||
|
||||
l.Conv2D(256, (5, 5), activation='relu', padding='same'),
|
||||
l.BatchNormalization(),
|
||||
l.MaxPooling2D((3, 3), strides=2),
|
||||
l.Conv2D(384, (3, 3), activation='relu', padding='same'),
|
||||
l.Conv2D(384, (3, 3), activation='relu', padding='same'),
|
||||
l.Conv2D(256, (3, 3), activation='relu', padding='same'),
|
||||
l.BatchNormalization(),
|
||||
l.MaxPooling2D((3, 3), strides=2),
|
||||
l.Flatten(),
|
||||
l.Dense(4096, activation='relu'),
|
||||
l.Dropout(0.5),
|
||||
l.Dense(4096, activation='relu'),
|
||||
l.Dropout(0.5),
|
||||
l.Dense(10, activation='softmax'),
|
||||
])
|
||||
|
||||
model.compile(optimizer = o.Adam(learning_rate = 0.0001),
|
||||
loss = 'categorical_crossentropy',
|
||||
metrics = ['accuracy'])
|
||||
|
||||
print(model.summary())
|
||||
|
||||
model.fit(train_generator,
|
||||
epochs = 3,
|
||||
validation_data = val_generator)
|
||||
|
||||
model.save_weights("w1.weights.h5")
|
||||
64
test2.py
Normal file
64
test2.py
Normal file
@ -0,0 +1,64 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
||||
|
||||
img_size = (150, 150)
|
||||
batch_size = 128
|
||||
extract_path="../ds/raw-img"
|
||||
|
||||
datagen = ImageDataGenerator(
|
||||
rescale=1.0/255,
|
||||
validation_split=0.2
|
||||
)
|
||||
|
||||
def __dg(subset):
|
||||
return datagen.flow_from_directory(extract_path,
|
||||
target_size = img_size,
|
||||
batch_size = batch_size,
|
||||
class_mode = "categorical",
|
||||
subset = subset,
|
||||
shuffle = True)
|
||||
|
||||
train_generator = __dg("training")
|
||||
val_generator = __dg("validation")
|
||||
|
||||
|
||||
from tensorflow.keras import models as m
|
||||
from tensorflow.keras import layers as l
|
||||
from tensorflow.keras import optimizers as o
|
||||
|
||||
model = m.Sequential([
|
||||
l.Input(shape=(150, 150, 3)),
|
||||
l.Conv2D(96, (11, 11), strides=4, activation='relu'),
|
||||
l.BatchNormalization(),
|
||||
l.MaxPooling2D((3, 3), strides=2),
|
||||
|
||||
l.Conv2D(192, (5, 5), activation='relu', padding='same'),
|
||||
l.BatchNormalization(),
|
||||
l.MaxPooling2D((3, 3), strides=2),
|
||||
l.Conv2D(256, (3, 3), activation='relu', padding='same'),
|
||||
l.Conv2D(256, (3, 3), activation='relu', padding='same'),
|
||||
l.Conv2D(160, (3, 3), activation='relu', padding='same'),
|
||||
l.BatchNormalization(),
|
||||
l.MaxPooling2D((3, 3), strides=2),
|
||||
l.Flatten(),
|
||||
l.Dense(1024, activation='relu'),
|
||||
l.Dropout(0.5),
|
||||
l.Dense(1024, activation='relu'),
|
||||
l.Dropout(0.5),
|
||||
l.Dense(10, activation='softmax'),
|
||||
])
|
||||
|
||||
model.compile(optimizer = o.Adam(learning_rate = 0.0001),
|
||||
loss = 'categorical_crossentropy',
|
||||
metrics = ['accuracy'])
|
||||
|
||||
print(model.summary())
|
||||
|
||||
model.load_weights("w2.weights.h5")
|
||||
|
||||
model.fit(train_generator,
|
||||
epochs = 10,
|
||||
validation_data = val_generator)
|
||||
|
||||
model.save_weights("w2.weights.h5")
|
||||
Loading…
x
Reference in New Issue
Block a user