40 lines
1.3 KiB
Python
40 lines
1.3 KiB
Python
|
import pandas as pd
|
||
|
from sklearn.model_selection import train_test_split
|
||
|
from sklearn.tree import DecisionTreeClassifier
|
||
|
from sklearn.metrics import accuracy_score
|
||
|
|
||
|
# прочитали датасет
|
||
|
data = pd.read_csv("titanic_data.csv")
|
||
|
|
||
|
# определение признаков
|
||
|
features = ['Sex', 'Age', 'sibsp']
|
||
|
|
||
|
# целевая переменная - выжившие
|
||
|
target = 'Survived'
|
||
|
|
||
|
# разделили данные на тренировочную и тестовую выборки
|
||
|
train_data, test_data, train_labels, test_labels = train_test_split(
|
||
|
data[features],
|
||
|
data[target],
|
||
|
test_size=0.2,
|
||
|
random_state=42
|
||
|
)
|
||
|
|
||
|
# создали модель decision tree classifier
|
||
|
model = DecisionTreeClassifier()
|
||
|
|
||
|
# натренировали модель
|
||
|
model.fit(train_data, train_labels)
|
||
|
|
||
|
# получили значения модели для проверки точности
|
||
|
predictions = model.predict(test_data)
|
||
|
|
||
|
# вычислили точность модели
|
||
|
accuracy = accuracy_score(test_labels, predictions)
|
||
|
print("точность модели:", accuracy)
|
||
|
|
||
|
# нашли два самых важных признака
|
||
|
importances = model.feature_importances_
|
||
|
indices = (-importances).argsort()[:2]
|
||
|
important_features = [features[i] for i in indices]
|
||
|
print("два самых важных признака:", important_features)
|