add analize.py, increase batch size, fix to epoch 20 for image recognition
This commit is contained in:
		
							parent
							
								
									6f93c696fd
								
							
						
					
					
						commit
						bea3d90d87
					
				
							
								
								
									
										59
									
								
								analize2.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										59
									
								
								analize2.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,59 @@
 | 
			
		||||
#!/usr/bin/python3
 | 
			
		||||
 | 
			
		||||
from tensorflow.keras.preprocessing.image import ImageDataGenerator
 | 
			
		||||
 | 
			
		||||
img_size = (150, 150)
 | 
			
		||||
batch_size = 128
 | 
			
		||||
extract_path="../ds/raw-img"
 | 
			
		||||
 | 
			
		||||
datagen = ImageDataGenerator(
 | 
			
		||||
        rescale=1.0/255,
 | 
			
		||||
        validation_split=0.2
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
def __dg(subset):
 | 
			
		||||
    return datagen.flow_from_directory(extract_path,
 | 
			
		||||
                                       target_size = img_size,
 | 
			
		||||
                                       batch_size = batch_size,
 | 
			
		||||
                                       class_mode = "categorical",
 | 
			
		||||
                                       subset = subset,
 | 
			
		||||
                                       shuffle = True)
 | 
			
		||||
 | 
			
		||||
train_generator = __dg("training")
 | 
			
		||||
val_generator = __dg("validation")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
from tensorflow.keras import models as m
 | 
			
		||||
from tensorflow.keras import layers as l
 | 
			
		||||
from tensorflow.keras import optimizers as o
 | 
			
		||||
 | 
			
		||||
model = m.Sequential([
 | 
			
		||||
    l.Input(shape=(150, 150, 3)),
 | 
			
		||||
    l.Conv2D(96, (11, 11), strides=4, activation='relu'),
 | 
			
		||||
    l.BatchNormalization(),
 | 
			
		||||
    l.MaxPooling2D((3, 3), strides=2),
 | 
			
		||||
 | 
			
		||||
    l.Conv2D(192, (5, 5), activation='relu', padding='same'),
 | 
			
		||||
    l.BatchNormalization(),
 | 
			
		||||
    l.MaxPooling2D((3, 3), strides=2),
 | 
			
		||||
    l.Conv2D(256, (3, 3), activation='relu', padding='same'),
 | 
			
		||||
    l.Conv2D(256, (3, 3), activation='relu', padding='same'),
 | 
			
		||||
    l.Conv2D(160, (3, 3), activation='relu', padding='same'),
 | 
			
		||||
    l.BatchNormalization(),
 | 
			
		||||
    l.MaxPooling2D((3, 3), strides=2),
 | 
			
		||||
    l.Flatten(),
 | 
			
		||||
    l.Dense(1024, activation='relu'),
 | 
			
		||||
    l.Dropout(0.5),
 | 
			
		||||
    l.Dense(1024, activation='relu'),
 | 
			
		||||
    l.Dropout(0.5),
 | 
			
		||||
    l.Dense(10, activation='softmax'),
 | 
			
		||||
])
 | 
			
		||||
 | 
			
		||||
model.compile(optimizer = o.Adam(learning_rate = 0.0001),
 | 
			
		||||
              loss = 'categorical_crossentropy',
 | 
			
		||||
              metrics = ['accuracy'])
 | 
			
		||||
 | 
			
		||||
model.load_weights("w2.weights.h5")
 | 
			
		||||
 | 
			
		||||
l, a = model.evaluate(val_generator)
 | 
			
		||||
print(f"Loss: {l}   Accuracy: {a}")
 | 
			
		||||
							
								
								
									
										2
									
								
								find2.py
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								find2.py
									
									
									
									
									
								
							@ -39,7 +39,7 @@ model.compile(optimizer = o.Adam(learning_rate = 0.0001),
 | 
			
		||||
              loss = "categorical_crossentropy",
 | 
			
		||||
              metrics = ["accuracy"])
 | 
			
		||||
 | 
			
		||||
model.load_weights("w2.weights.h5")
 | 
			
		||||
model.load_weights("ep20.weights.h5")
 | 
			
		||||
 | 
			
		||||
if len(argv) >= 2:
 | 
			
		||||
    for i in argv[1:]:
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user