IIS_2023_1/mashkova_margarita_lab_4/main.py

73 lines
3.0 KiB
Python
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import pandas as pd
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.cluster import DBSCAN
from sklearn.decomposition import PCA
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import normalize
from collections import Counter
filename = "mobiles.csv"
# Считываем данные из файла в DataFrame
data = pd.read_csv(filename, sep=',')
# Удаляем столбец с идентификатором
data.pop("Id")
# Приведение строковых значений признаков к численным при помощи векторайзера с суммированием
FEATURE_COLUMNS_TO_PROC = ['Name', 'Brand', 'Model', 'Operating system']
for column_name in FEATURE_COLUMNS_TO_PROC:
vectorizer = TfidfVectorizer()
train_text_feature_matrix = vectorizer.fit_transform(data[column_name]).toarray()
a = pd.DataFrame(train_text_feature_matrix)
data[column_name] = a[a.columns[1:]].apply(lambda x: sum(x.dropna().astype(float)), axis=1)
# Приведение строковых значений к численным при помощи числового кодирования LabelEncoder
le = LabelEncoder()
data['Touchscreen'] = le.fit_transform(data['Touchscreen'])
data['Wi-Fi'] = le.fit_transform(data['Wi-Fi'])
data['Bluetooth'] = le.fit_transform(data['Bluetooth'])
data['GPS'] = le.fit_transform(data['GPS'])
data['3G'] = le.fit_transform(data['3G'])
data['4G/ LTE'] = le.fit_transform(data['4G/ LTE'])
scaler = StandardScaler()
X_scaled = scaler.fit_transform(data)
# Нормализация масштабированных данных
X_normalized = normalize(X_scaled)
X_normalized = pd.DataFrame(X_normalized)
pca = PCA(n_components=2)
X_principal = pca.fit_transform(X_normalized)
X_principal = pd.DataFrame(X_principal)
X_principal.columns = ['P1', 'P2']
# Определяем и обучаем модель
dbscan = DBSCAN(eps=0.05, min_samples=5).fit(X_principal)
labels = dbscan.labels_
# Вычисление количества кластеров
N_clus = len(set(labels))-(1 if -1 in labels else 0)
print('Количество получившихся кластеров: %d' % N_clus)
counter = Counter(dbscan.labels_)
clusters_df = pd.DataFrame({'Номер кластера': counter.keys(), 'Кол-во элементов': counter.values()}) \
.sort_values(by='Кол-во элементов', ascending=False)
print(clusters_df.reset_index(drop=True))
# Вычисление количества шумовых точек
n_noise = list(dbscan.labels_).count(-1)
print('Количество шумовых точек: %d' % n_noise)
# Оценка качества кластеризации с помощью silhouette_score
silhouette_avg = silhouette_score(X_principal, labels)
print("Silhouette Score:", silhouette_avg)
plt.scatter(X_principal['P1'], X_principal['P2'], c=labels)
plt.savefig('dbscan_plot.png')
# plt.show()