IIS_2023_1/podkorytova_yulia_lab_7/lr7.py
2023-11-27 02:51:56 +04:00

68 lines
2.9 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import numpy as np
from tensorflow import keras
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
# функция подготовки текста, создания и тренировки модели
def train_model(file_path, epochs):
# cчитывание данных из файла
f = open(file_path, encoding='utf-8')
data = f.read()
f.close()
# создание токенизатора
tokenizer = Tokenizer()
tokenizer.fit_on_texts([data])
# преобразование текста в последовательности чисел
sequences = tokenizer.texts_to_sequences([data])
# создание обучающих данных
input_sequences = []
for sequence in sequences:
for i in range(1, len(sequence)):
n_gram_sequence = sequence[:i+1]
input_sequences.append(n_gram_sequence)
# предобработка для получения одинаковой длины последовательностей
max_sequence_len = max([len(sequence) for sequence in input_sequences])
input_sequences = pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre')
# разделение на входные и выходные данные
x = input_sequences[:, :-1]
y = input_sequences[:, -1]
# создание модели рекуррентной нейронной сети
model = keras.Sequential()
model.add(keras.layers.Embedding(len(tokenizer.word_index)+1, 100, input_length=max_sequence_len-1))
model.add(keras.layers.Dropout(0.2))
model.add(keras.layers.LSTM(150))
model.add(keras.layers.Dense(len(tokenizer.word_index)+1, activation='softmax'))
# компиляция и обучение модели
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x, y, epochs=epochs, verbose=1)
return model, tokenizer, max_sequence_len
# функция генерации текста
def generate_text(model, tokenizer, max_sequence_len, seed_text, next_words):
for _ in range(next_words):
token_list = tokenizer.texts_to_sequences([seed_text])[0]
token_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')
predicted = model.predict(token_list)
predict_index = np.argmax(predicted, axis=-1)
word = tokenizer.index_word.get(predict_index[0])
seed_text += " " + word
return seed_text
# русский текст
model, tokenizer, max_sequence_len = train_model('rus.txt', 150)
rus_text = generate_text(model, tokenizer, max_sequence_len, "Зима", 25)
print(rus_text)
# английский текст
model, tokenizer, max_sequence_len = train_model('eng.txt', 150)
eng_text = generate_text(model, tokenizer, max_sequence_len, "Winter", 25)
print(eng_text)