35 lines
1.3 KiB
Python
35 lines
1.3 KiB
Python
|
import numpy as np
|
||
|
from keras.models import load_model
|
||
|
from keras_preprocessing.sequence import pad_sequences
|
||
|
from keras_preprocessing.text import Tokenizer
|
||
|
|
||
|
from antonov_dmitry_lab_7.lab7 import tokenizer, max_sequence_length
|
||
|
|
||
|
# Step 3: Load the pre-trained model
|
||
|
model = load_model('my_model.h5') # Replace with the actual path to your model file
|
||
|
|
||
|
# Recreate the Tokenizer and compile the model (in case the model was not compiled before saving)
|
||
|
with open('small.txt', 'r') as file:
|
||
|
text = file.read()
|
||
|
|
||
|
tokenizer = Tokenizer()
|
||
|
tokenizer.fit_on_texts([text])
|
||
|
total_words = len(tokenizer.word_index) + 1
|
||
|
def generate_text(seed_text, next_words, model, max_sequence_length):
|
||
|
for _ in range(next_words):
|
||
|
token_list = tokenizer.texts_to_sequences([seed_text])[0]
|
||
|
token_list = pad_sequences([token_list], maxlen=max_sequence_length - 1, padding='pre')
|
||
|
predicted = np.argmax(model.predict(token_list), axis=-1)
|
||
|
output_word = ""
|
||
|
for word, index in tokenizer.word_index.items():
|
||
|
if index == predicted:
|
||
|
output_word = word
|
||
|
break
|
||
|
seed_text += " " + output_word
|
||
|
return seed_text
|
||
|
|
||
|
|
||
|
# Generate text using the loaded model (same as before)
|
||
|
generated_text = generate_text("Once upon a", 50, model, max_sequence_length)
|
||
|
print(generated_text)
|