74 lines
2.8 KiB
Python
74 lines
2.8 KiB
Python
import pandas as pd
|
|
from sklearn.model_selection import train_test_split, GridSearchCV
|
|
from sklearn.ensemble import RandomForestRegressor
|
|
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
|
|
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
|
|
import matplotlib.pyplot as plt
|
|
import joblib
|
|
import numpy as np
|
|
|
|
# Загрузка данных
|
|
df = pd.read_csv('../../../../datasets/synthetic_tvs.csv')
|
|
|
|
# Проверка и очистка данных
|
|
required_columns = ['display', 'tuners', 'features', 'os', 'power_of_volume', 'color', 'screen_size', 'price']
|
|
missing_columns = [col for col in required_columns if col not in df.columns]
|
|
if missing_columns:
|
|
raise Exception(f"Отсутствуют столбцы: {missing_columns}")
|
|
|
|
df = df.dropna(subset=required_columns)
|
|
|
|
# Преобразование категориальных переменных
|
|
categorical_features = ['display', 'tuners', 'features', 'os', 'power_of_volume','color']
|
|
df = pd.get_dummies(df, columns=categorical_features, drop_first=True)
|
|
|
|
# Разделение на X и y
|
|
X = df.drop('price', axis=1)
|
|
y = df['price']
|
|
|
|
# Полиномиальные признаки
|
|
poly = PolynomialFeatures(degree=1, interaction_only=True, include_bias=False)
|
|
X_poly = poly.fit_transform(X)
|
|
|
|
# Масштабирование
|
|
scaler = StandardScaler()
|
|
X_poly_scaled = scaler.fit_transform(X_poly)
|
|
|
|
# Разделение на обучающую и тестовую выборки
|
|
X_train, X_test, y_train, y_test = train_test_split(X_poly_scaled, y, test_size=0.5, random_state=42)
|
|
|
|
# Настройка Random Forest
|
|
param_grid = {
|
|
'n_estimators': [100, 200],
|
|
'max_depth': [10, 20],
|
|
'max_features': ['sqrt', 'log2', 0.5],
|
|
'min_samples_split': [5, 10],
|
|
'min_samples_leaf': [2, 4]
|
|
}
|
|
|
|
grid_search = GridSearchCV(RandomForestRegressor(random_state=42), param_grid, cv=3, scoring='neg_mean_absolute_error')
|
|
grid_search.fit(X_train, y_train)
|
|
best_model = grid_search.best_estimator_
|
|
|
|
# Вывод важности признаков
|
|
feature_importances = best_model.feature_importances_
|
|
feature_names = poly.get_feature_names_out(X.columns)
|
|
|
|
# Построение графика важности признаков
|
|
sorted_indices = np.argsort(feature_importances)[::-1]
|
|
plt.figure(figsize=(10, 8))
|
|
plt.barh([feature_names[i] for i in sorted_indices[:20]], feature_importances[sorted_indices[:20]])
|
|
plt.xlabel('Importance')
|
|
plt.ylabel('Feature')
|
|
plt.title('Top 20 Feature Importances')
|
|
plt.gca().invert_yaxis()
|
|
plt.show()
|
|
|
|
# Сохранение модели
|
|
feature_columns = X.columns.tolist()
|
|
joblib.dump(feature_columns, '../../tvML/feature_columns.pkl')
|
|
joblib.dump(best_model, '../../tvML/tv_price_model.pkl')
|
|
joblib.dump(poly, '../../tvML/poly_transformer.pkl')
|
|
joblib.dump(scaler, '../../tvML/scaler.pkl')
|
|
print("Модель для телевизоров сохранена.")
|