195 lines
8.3 KiB
Python
195 lines
8.3 KiB
Python
|
import array
|
|||
|
import math
|
|||
|
import random
|
|||
|
|
|||
|
import matplotlib.pyplot as plt
|
|||
|
import numpy as np
|
|||
|
from matplotlib.colors import ListedColormap
|
|||
|
from sklearn.model_selection import train_test_split
|
|||
|
from sklearn.datasets import make_moons, make_circles, make_classification
|
|||
|
from sklearn.neural_network import MLPClassifier
|
|||
|
from sklearn.linear_model import LinearRegression, Lasso, Ridge, Perceptron
|
|||
|
from sklearn.metrics import accuracy_score
|
|||
|
|
|||
|
from flask import Flask
|
|||
|
|
|||
|
app = Flask(__name__)
|
|||
|
|
|||
|
@app.route("/")
|
|||
|
def home():
|
|||
|
return "<html>" \
|
|||
|
"<h1>Жукова Алина ПИбд-41</h1>" \
|
|||
|
"<h1>Лабораторная работа №1</h1>" \
|
|||
|
"<table>" \
|
|||
|
"<td>" \
|
|||
|
"<form Action='http://127.0.0.1:5000/k4_1_task_1' Method=get>" \
|
|||
|
"<input type=submit value='Работа с типовыми наборами данных и различными моделями'>" \
|
|||
|
"</form>" \
|
|||
|
"</td>" \
|
|||
|
"</table>" \
|
|||
|
"</html>"
|
|||
|
|
|||
|
|
|||
|
# Работа с типовыми наборами данных и различными моделями
|
|||
|
# сгенерируйте определенный тип данных и сравните на нем 3 модели
|
|||
|
# 10.Данные: make_moons (noise=0.3, random_state=rs)
|
|||
|
# Модели:
|
|||
|
# · Линейную регрессию
|
|||
|
# · Многослойный персептрон с 10-ю нейронами в скрытом слое (alpha = 0.01)
|
|||
|
# · Многослойный персептрон со 100-а нейронами в скрытом слое (alpha = 0.01)
|
|||
|
@app.route("/k4_1_task_1", methods=['GET'])
|
|||
|
def k4_1_task_1():
|
|||
|
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
|
|||
|
random_state=0, n_clusters_per_class=1)
|
|||
|
rng = np.random.RandomState(2)
|
|||
|
X += 2 + rng.uniform(size=X.shape)
|
|||
|
linearly_dataset = (X, y)
|
|||
|
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
|
|||
|
cm_bright2 = ListedColormap(['#FF000066', '#0000FF66'])
|
|||
|
moon_dataset = make_moons(noise=0.3, random_state=0)
|
|||
|
circles_dataset = make_circles(noise=0.2, factor=0.5, random_state=1)
|
|||
|
datasets = [moon_dataset, circles_dataset, linearly_dataset]
|
|||
|
|
|||
|
X, y = moon_dataset
|
|||
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2, random_state=42)
|
|||
|
|
|||
|
ridge_regression = Ridge(alpha=3, random_state=240)
|
|||
|
ridge_regression.fit(X_train, y_train)
|
|||
|
linear_accuracy = str(ridge_regression.score(X_test, y_test))
|
|||
|
|
|||
|
plt.subplot(1, 3, 1)
|
|||
|
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
|
|||
|
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright2)
|
|||
|
|
|||
|
x_min = moon_dataset[0][:, 0].min()
|
|||
|
g_min = None
|
|||
|
y_min = None
|
|||
|
x_max = moon_dataset[0][:, 0].max()
|
|||
|
g_max = None
|
|||
|
y_max = None
|
|||
|
for k in range(-21, 50):
|
|||
|
elem = np.array([[x_min, k / 20]])
|
|||
|
getted = ridge_regression.predict(elem)
|
|||
|
if (g_min == None or math.fabs(0.5 - getted) < math.fabs(0.5 - g_min)):
|
|||
|
g_min = getted
|
|||
|
y_min = elem[0][1]
|
|||
|
else:
|
|||
|
if(math.fabs(0.5 - getted) > math.fabs(0.5 - g_min)):
|
|||
|
break
|
|||
|
|
|||
|
for k in range(-21, 50):
|
|||
|
elem = np.array([[x_max, k / 20]])
|
|||
|
getted = ridge_regression.predict(elem)
|
|||
|
if (g_max == None or math.fabs(0.5 - getted) < math.fabs(0.5 - g_max)):
|
|||
|
g_max = getted
|
|||
|
y_max = elem[0][1]
|
|||
|
else:
|
|||
|
if(math.fabs(0.5 - getted) > math.fabs(0.5 - g_max)):
|
|||
|
break
|
|||
|
|
|||
|
x = ridge_regression.predict(X_test)
|
|||
|
plt.plot([x_min, x_max], [y_min, y_max], label="line", color="yellow")
|
|||
|
# plt.show()
|
|||
|
|
|||
|
# Перцептрон 10 скрытых слоев
|
|||
|
perceptr = MLPClassifier(random_state=1, max_iter=2000, n_iter_no_change=20, activation="tanh",
|
|||
|
alpha=0.01, hidden_layer_sizes=[10,], tol=0.00000001)
|
|||
|
perceptr.fit(X_train, y_train)
|
|||
|
prediction = perceptr.predict(X_test)
|
|||
|
perceptron_accuracy = str(accuracy_score(y_test, prediction))
|
|||
|
prediction = perceptr.predict(moon_dataset[0])
|
|||
|
perceptron_accuracy_all = str(accuracy_score(moon_dataset[1], prediction))
|
|||
|
|
|||
|
params_set = []
|
|||
|
y_elem = None
|
|||
|
g_elem = None
|
|||
|
|
|||
|
for data_elem in moon_dataset[0]:
|
|||
|
for k in range(-21, 50):
|
|||
|
elem = np.array([[data_elem[0], k / 20]])
|
|||
|
getted = perceptr.predict(elem)
|
|||
|
if (g_elem == None and getted == 0):
|
|||
|
params_set.append([data_elem[0], -21 / 20])
|
|||
|
g_elem = None
|
|||
|
else:
|
|||
|
if(getted == 1 and (getted == g_elem or g_elem == None)):
|
|||
|
g_elem = getted
|
|||
|
y_elem = elem[0][1]
|
|||
|
else:
|
|||
|
params_set.append([data_elem[0], y_elem])
|
|||
|
g_elem = None
|
|||
|
break
|
|||
|
|
|||
|
if (g_elem != None):
|
|||
|
params_set.append([data_elem[0], 50 / 20])
|
|||
|
g_elem = None
|
|||
|
|
|||
|
|
|||
|
params_set.sort()
|
|||
|
params_set = np.array(params_set)
|
|||
|
plt.subplot(1, 3, 2)
|
|||
|
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
|
|||
|
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright2)
|
|||
|
plt.plot(params_set[:, 0], params_set[:, 1], label="line", color="yellow")
|
|||
|
# plt.show()
|
|||
|
|
|||
|
# Перцептрон 100 скрытых слоев
|
|||
|
perceptr100 = MLPClassifier(random_state=1, max_iter=2000, n_iter_no_change=20, activation="tanh",
|
|||
|
alpha=0.01, hidden_layer_sizes=[100, ], tol=0.00000001)
|
|||
|
perceptr100.fit(X_train, y_train)
|
|||
|
prediction = perceptr100.predict(X_test)
|
|||
|
perceptron100_accuracy = str(accuracy_score(y_test, prediction))
|
|||
|
prediction = perceptr100.predict(moon_dataset[0])
|
|||
|
perceptron100_accuracy_all = str(accuracy_score(moon_dataset[1], prediction))
|
|||
|
|
|||
|
params_set = []
|
|||
|
y_elem = None
|
|||
|
g_elem = None
|
|||
|
|
|||
|
for data_elem in moon_dataset[0]:
|
|||
|
for k in range(-21, 30):
|
|||
|
elem = np.array([[data_elem[0], k / 20]])
|
|||
|
getted = perceptr100.predict(elem)
|
|||
|
if (g_elem == None and getted == 0):
|
|||
|
params_set.append([data_elem[0], -21 / 20])
|
|||
|
g_elem = None
|
|||
|
else:
|
|||
|
if(getted == 1 and (getted == g_elem or g_elem == None)):
|
|||
|
g_elem = getted
|
|||
|
y_elem = elem[0][1]
|
|||
|
else:
|
|||
|
params_set.append([data_elem[0], y_elem])
|
|||
|
g_elem = None
|
|||
|
break
|
|||
|
|
|||
|
if (g_elem != None):
|
|||
|
params_set.append([data_elem[0], 30 / 20])
|
|||
|
g_elem = None
|
|||
|
|
|||
|
|
|||
|
params_set.sort()
|
|||
|
params_set = np.array(params_set)
|
|||
|
plt.subplot(1, 3, 3)
|
|||
|
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
|
|||
|
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright2)
|
|||
|
plt.plot(params_set[:, 0], params_set[:, 1], label="line", color="yellow")
|
|||
|
plt.show()
|
|||
|
|
|||
|
|
|||
|
return "<html>" \
|
|||
|
"<h1>Работа с типовыми наборами данных и различными моделями</h1>" \
|
|||
|
"<h2>Вариант 10. Данные: make_moons (noise=0.3, random_state=rs)</h2>" \
|
|||
|
"<h2>Модели:\n 1) Линейная регрессия" \
|
|||
|
"\n 2) Многослойный персептрон с 10-ю нейронами в скрытом слое (alpha = 0.01)" \
|
|||
|
"\n 3) Многослойный персептрон со 100-а нейронами в скрытом слое (alpha = 0.01)</h2>" \
|
|||
|
"<h2>Оценка точности линейной регрессии: " + linear_accuracy + "</h2>" \
|
|||
|
"<h2>Оценка точности (тестовые данные) перцептрона 10 нейронов в скрытом слое: " + perceptron_accuracy + "</h2>" \
|
|||
|
"<h2>Оценка точности (тестовые данные) перцептрона 100 нейронов в скрытом слое: " + perceptron100_accuracy + "</h2>" \
|
|||
|
"<h2>Оценка точности (все точки) перцептрона 10 нейронов в скрытом слое: " + perceptron_accuracy_all + "</h2>" \
|
|||
|
"<h2>Оценка точности (все точки) перцептрона 100 нейронов в скрытом слое: " + perceptron100_accuracy_all + "</h2>" \
|
|||
|
"</html>"
|
|||
|
|
|||
|
|
|||
|
if __name__ == "__main__":
|
|||
|
app.run(debug=True)
|