neuro-lab7/main.py
2025-12-06 15:56:00 +02:00

67 lines
1.5 KiB
Python

from tensorflow.keras import layers as kl
from tensorflow.keras import models as km
from tensorflow.keras import losses as ks
from tensorflow.keras import optimizers as ko
from tensorflow.keras import callbacks as kc
from tensorflow.keras.preprocessing.text import Tokenizer as kT
from tensorflow.keras.utils import pad_sequences as kps
import numpy as np
import pandas as pd
print("I")
t = pd.read_csv("prepped_train.csv",
header = None,
names = ['i', 'c', 'r'])
print("R")
y = t['c']
r = t['r'].astype(str)
tk = kT(num_words = 6000)
tk.fit_on_texts(r)
print("F")
s = tk.texts_to_sequences(r)
print("T")
ts = kps(s, maxlen = 100)
print("P")
'''
m = km.Sequential([
kl.Input(shape = (None, ), dtype = 'int32'),
kl.Embedding(6000, 96),
kl.Dropout(0.2),
kl.Conv1D(128, 5, activation = 'relu'),
kl.LSTM(128, return_sequences = True),
kl.LSTM(64),
kl.Dense(64),
kl.Dropout(0.5),
kl.Dense(1, activation = 'sigmoid')
])
m.compile(optimizer = ko.Lion(learning_rate = 0.0005),
loss = 'binary_crossentropy',
metrics = ['accuracy'])
'''
from model import m
ckpt = kc.ModelCheckpoint('model2.keras',
monitor = 'val_accuracy',
save_best_only = True,
verbose = 1)
m.load_weights("model1.keras")
history = m.fit(ts,
y,
epochs = 15,
batch_size = 1024,
validation_split = 0.1,
callbacks = [ckpt])