From ef485bf5146b63f6beca2292c229cd0ddb353eb6 Mon Sep 17 00:00:00 2001 From: DmitriyAntonov Date: Thu, 12 Oct 2023 21:20:23 +0400 Subject: [PATCH] =?UTF-8?q?=D1=80=D0=B5=D0=B0=D0=B4=D0=B82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- antonov_dmitry_lab_7/for_test.py | 34 -------------------------------- 1 file changed, 34 deletions(-) delete mode 100644 antonov_dmitry_lab_7/for_test.py diff --git a/antonov_dmitry_lab_7/for_test.py b/antonov_dmitry_lab_7/for_test.py deleted file mode 100644 index c6b099e..0000000 --- a/antonov_dmitry_lab_7/for_test.py +++ /dev/null @@ -1,34 +0,0 @@ -import numpy as np -from keras.models import load_model -from keras_preprocessing.sequence import pad_sequences -from keras_preprocessing.text import Tokenizer - -from antonov_dmitry_lab_7.lab7 import tokenizer, max_sequence_length - -# Step 3: Load the pre-trained model -model = load_model('my_model.h5') # Replace with the actual path to your model file - -# Recreate the Tokenizer and compile the model (in case the model was not compiled before saving) -with open('small.txt', 'r') as file: - text = file.read() - -tokenizer = Tokenizer() -tokenizer.fit_on_texts([text]) -total_words = len(tokenizer.word_index) + 1 -def generate_text(seed_text, next_words, model, max_sequence_length): - for _ in range(next_words): - token_list = tokenizer.texts_to_sequences([seed_text])[0] - token_list = pad_sequences([token_list], maxlen=max_sequence_length - 1, padding='pre') - predicted = np.argmax(model.predict(token_list), axis=-1) - output_word = "" - for word, index in tokenizer.word_index.items(): - if index == predicted: - output_word = word - break - seed_text += " " + output_word - return seed_text - - -# Generate text using the loaded model (same as before) -generated_text = generate_text("Once upon a", 50, model, max_sequence_length) -print(generated_text)