AIM-PIbd-31-Kozyrev-S-S/lab_4/lab_4.ipynb
2024-11-22 16:07:46 +04:00

22 KiB
Raw Blame History

Вариант: Список людей.

In [52]:
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_squared_error, r2_score, accuracy_score, f1_score
import numpy as np
import featuretools as ft
from sklearn.metrics import accuracy_score, classification_report

# Функция для применения oversampling
def apply_oversampling(X, y):
    oversampler = RandomOverSampler(random_state=42)
    X_resampled, y_resampled = oversampler.fit_resample(X, y)
    return X_resampled, y_resampled

# Функция для применения undersampling
def apply_undersampling(X, y):
    undersampler = RandomUnderSampler(random_state=42)
    X_resampled, y_resampled = undersampler.fit_resample(X, y)
    return X_resampled, y_resampled

def split_stratified_into_train_val_test(
    df_input,
    stratify_colname="y",
    frac_train=0.6,
    frac_val=0.15,
    frac_test=0.25,
    random_state=None,
):
    """
    Splits a Pandas dataframe into three subsets (train, val, and test)
    following fractional ratios provided by the user, where each subset is
    stratified by the values in a specific column (that is, each subset has
    the same relative frequency of the values in the column). It performs this
    splitting by running train_test_split() twice.

    Parameters
    ----------
    df_input : Pandas dataframe
        Input dataframe to be split.
    stratify_colname : str
        The name of the column that will be used for stratification. Usually
        this column would be for the label.
    frac_train : float
    frac_val   : float
    frac_test  : float
        The ratios with which the dataframe will be split into train, val, and
        test data. The values should be expressed as float fractions and should
        sum to 1.0.
    random_state : int, None, or RandomStateInstance
        Value to be passed to train_test_split().

    Returns
    -------
    df_train, df_val, df_test :
        Dataframes containing the three splits.
    """

    if frac_train + frac_val + frac_test != 1.0:
        raise ValueError(
            "fractions %f, %f, %f do not add up to 1.0"
            % (frac_train, frac_val, frac_test)
        )

    if stratify_colname not in df_input.columns:
        raise ValueError("%s is not a column in the dataframe" % (stratify_colname))

    X = df_input  # Contains all columns.
    y = df_input[
        [stratify_colname]
    ]  # Dataframe of just the column on which to stratify.

    # Split original dataframe into train and temp dataframes.
    df_train, df_temp, y_train, y_temp = train_test_split(
        X, y, stratify=y, test_size=(1.0 - frac_train), random_state=random_state
    )

    # Split the temp dataframe into val and test dataframes.
    relative_frac_test = frac_test / (frac_val + frac_test)
    df_val, df_test, y_val, y_test = train_test_split(
        df_temp,
        y_temp,
        stratify=y_temp,
        test_size=relative_frac_test,
        random_state=random_state,
    )

    assert len(df_input) == len(df_train) + len(df_val) + len(df_test)

    return df_train, df_val, df_test


df = pd.read_csv("../data/age.csv", nrows=10000)
df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 10000 entries, 0 to 9999
Data columns (total 10 columns):
 #   Column             Non-Null Count  Dtype  
---  ------             --------------  -----  
 0   Id                 10000 non-null  object 
 1   Name               10000 non-null  object 
 2   Short description  9996 non-null   object 
 3   Gender             9927 non-null   object 
 4   Country            9721 non-null   object 
 5   Occupation         9836 non-null   object 
 6   Birth year         10000 non-null  int64  
 7   Death year         9999 non-null   float64
 8   Manner of death    1893 non-null   object 
 9   Age of death       9999 non-null   float64
dtypes: float64(2), int64(1), object(7)
memory usage: 781.4+ KB

Как бизнес-цели выделим следующие 2 варианта: 1) GameDev. Создание игры про конкретного персонажа, живущего в конкретном временном промежутке в конкретной стране. 2) Исследование зависимости длительности жизни от страны проживания.

Поскольку именно эти бизнес-цели были выбраны в предыдущей лабораторной работе, будем их использовать. Но возникает проблема с 1 целью: её невозможно использовать для задачи классификации. Заменим ее на классификацию людей по возрастным группам, что может быть полезно для рекламных целей.

Выполним подготовку данных

In [46]:
df.fillna({"Gender": "NaN", "Country": "NaN", "Occupation" : "NaN", "Manner of death" : "NaN"}, inplace=True)
df = df.dropna()
df['Country'] = df['Country'].str.split('; ')
df = df.explode('Country')
data = df.copy()

value_counts = data["Country"].value_counts()
rare = value_counts[value_counts < 100].index
data = data[~data["Country"].isin(rare)]

data.drop(data[~data['Gender'].isin(['Male', 'Female'])].index, inplace=True)

data1 = pd.get_dummies(data, columns=['Gender', 'Country', 'Occupation'], drop_first=True)

Определить достижимый уровень качества модели для каждой задачи. На основе имеющихся данных уровень качества моделей не будет высоким, поскольку все таки длительность жизни лишь примерная и точно ее угадать невозможно.

Выберем ориентиры для наших 2х задач: 1)Регрессии - средний возраст человека 2)Классификации - аиболее часто встречающаяся возрастная группа

Построим конвейер.

In [33]:
print(data.columns)
Index(['Id', 'Name', 'Short description', 'Birth year', 'Death year',
       'Age of death', 'Gender_Male', 'Country_France',
       'Country_German Confederation', 'Country_German Democratic Republic',
       ...
       'Manner of death_euthanasia', 'Manner of death_homicide',
       'Manner of death_homicide; natural causes',
       'Manner of death_internal bleeding', 'Manner of death_natural causes',
       'Manner of death_suicide',
       'Manner of death_suicide; homicide; accident',
       'Manner of death_suicide; unfortunate accident',
       'Manner of death_summary execution', 'Manner of death_unnatural death'],
      dtype='object', length=400)
In [37]:
X_reg = data1.drop(['Id', 'Name', 'Age of death', 'Short description', 'Manner of death'], axis=1)
y_reg = data1['Age of death']

# Разделение данных
X_train_reg, X_test_reg, y_train_reg, y_test_reg = train_test_split(X_reg, y_reg, test_size=0.2, random_state=42)

# Выбор моделей для регрессии
models_reg = {
    'Linear Regression': LinearRegression(),
    'Random Forest Regressor': RandomForestRegressor(random_state=42),
    'Gradient Boosting Regressor': GradientBoostingRegressor(random_state=42)
}

# Создание конвейера для регрессии
pipelines_reg = {}
for name, model in models_reg.items():
    pipelines_reg[name] = Pipeline([
        ('scaler', StandardScaler()),
        ('model', model)
    ])

# Определение сетки гиперпараметров для регрессии
param_grids_reg = {
    'Linear Regression': {},
    'Random Forest Regressor': {
        'model__n_estimators': [100, 200, 300],
        'model__max_depth': [None, 10, 20, 30]
    },
    'Gradient Boosting Regressor': {
        'model__n_estimators': [100, 200, 300],
        'model__learning_rate': [0.01, 0.1, 0.2],
        'model__max_depth': [3, 5, 7]
    }
}

# Настройка гиперпараметров для регрессии
best_models_reg = {}
for name, pipeline in pipelines_reg.items():
    grid_search = GridSearchCV(pipeline, param_grids_reg[name], cv=5, scoring='neg_mean_squared_error')
    grid_search.fit(X_train_reg, y_train_reg)
    best_models_reg[name] = grid_search.best_estimator_
    print(f'Best parameters for {name}: {grid_search.best_params_}')

# Обучение моделей для регрессии
for name, model in best_models_reg.items():
    model.fit(X_train_reg, y_train_reg)

# Оценка качества моделей для регрессии
for name, model in best_models_reg.items():
    y_pred_reg = model.predict(X_test_reg)
    mse = mean_squared_error(y_test_reg, y_pred_reg)
    r2 = r2_score(y_test_reg, y_pred_reg)
    print(f'{name}: MSE = {mse}, R2 = {r2}')
Best parameters for Linear Regression: {}
Best parameters for Random Forest Regressor: {'model__max_depth': None, 'model__n_estimators': 100}
Best parameters for Gradient Boosting Regressor: {'model__learning_rate': 0.2, 'model__max_depth': 7, 'model__n_estimators': 300}
Linear Regression: MSE = 0.002807184047660083, R2 = 0.9999899555289343
Random Forest Regressor: MSE = 11.46917740409879, R2 = 0.9589617856804076
Gradient Boosting Regressor: MSE = 8.202651735797296, R2 = 0.9706498410424512
In [50]:
data2 = data.drop(['Short description', 'Manner of death', 'Gender', 'Country', 'Occupation'], axis=1)
In [54]:
# Создание возрастных групп
bins = [0, 18, 30, 50, 70, 100]
labels = ['0-18', '19-30', '31-50', '51-70', '71+']
data['Age Group'] = pd.cut(data['Age of death'], bins=bins, labels=labels)

# Выбор признаков и целевой переменной для классификации
X_class = data2.drop(['Id', 'Name', 'Age of death', 'Age Group'], axis=1)
y_class = data['Age Group']  
print(X_class.columns)
# Разделение данных
X_train_class, X_test_class, y_train_class, y_test_class = train_test_split(X_class, y_class, test_size=0.2, random_state=42)

# Выбор моделей для классификации
models_class = {
    'Logistic Regression': LogisticRegression(random_state=42, max_iter=5000, solver='liblinear'),
    'Random Forest Classifier': RandomForestClassifier(random_state=42),
    'Gradient Boosting Classifier': GradientBoostingClassifier(random_state=42)
}

# Создание конвейера для классификации
pipelines_class = {}
for name, model in models_class.items():
    pipelines_class[name] = Pipeline([
        ('scaler', StandardScaler()),
        ('model', model)
    ])

# Определение сетки гиперпараметров для классификации
'''
param_grids_class = {
    'Logistic Regression': {
        'model__C': [0.1, 1, 10],
        'model__solver': ['lbfgs', 'liblinear']
    },
    'Random Forest Classifier': {
        'model__n_estimators': [100, 200, 300],
        'model__max_depth': [None, 10, 20, 30]
    },
    'Gradient Boosting Classifier': {
        'model__n_estimators': [100, 200, 300],
        'model__learning_rate': [0.01, 0.1, 0.2],
        'model__max_depth': [3, 5, 7]
    }
}'''
# Убрал определение параметров поскольку уже был предподсчет данных, но вылетела ошибка. Сохранил лучшие параметры

param_grids_class = {
    'Logistic Regression': {
        'model__C': [10],
        'model__solver': ['lbfgs']
    },
    'Random Forest Classifier': {
        'model__n_estimators': [200],
        'model__max_depth': [ 30]
    },
    'Gradient Boosting Classifier': {
        'model__n_estimators': [200],
        'model__learning_rate': [0.1],
        'model__max_depth': [7]
    }
}

# Настройка гиперпараметров для классификации
best_models_class = {}
for name, pipeline in pipelines_class.items():
    grid_search = GridSearchCV(pipeline, param_grids_class[name], cv=5, scoring='accuracy')
    grid_search.fit(X_train_class, y_train_class)
    best_models_class[name] = grid_search.best_estimator_
    print(f'Best parameters for {name}: {grid_search.best_params_}')

# Обучение моделей для классификации
for name, model in best_models_class.items():
    model.fit(X_train_class, y_train_class)

# Оценка качества моделей для классификации
for name, model in best_models_class.items():
    y_pred_class = model.predict(X_test_class)
    accuracy = accuracy_score(y_test_class, y_pred_class)
    report = classification_report(y_test_class, y_pred_class)
    print(f'{name}: Accuracy = {accuracy}')
    print(f'Classification Report:\n{report}')
Index(['Birth year', 'Death year'], dtype='object')
Best parameters for Logistic Regression: {'model__C': 10, 'model__solver': 'lbfgs'}
Best parameters for Random Forest Classifier: {'model__max_depth': 30, 'model__n_estimators': 200}
Best parameters for Gradient Boosting Classifier: {'model__learning_rate': 0.1, 'model__max_depth': 7, 'model__n_estimators': 200}
c:\Users\89176\sourse\MII\Labas\AIM-PIbd-31-Kozyrev-S-S\aimvenv\Lib\site-packages\sklearn\metrics\_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
  _warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\89176\sourse\MII\Labas\AIM-PIbd-31-Kozyrev-S-S\aimvenv\Lib\site-packages\sklearn\metrics\_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
  _warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
c:\Users\89176\sourse\MII\Labas\AIM-PIbd-31-Kozyrev-S-S\aimvenv\Lib\site-packages\sklearn\metrics\_classification.py:1531: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.
  _warn_prf(average, modifier, f"{metric.capitalize()} is", len(result))
Logistic Regression: Accuracy = 0.9248554913294798
Classification Report:
              precision    recall  f1-score   support

        0-18       0.00      0.00      0.00         5
       19-30       0.50      0.08      0.14        60
       31-50       0.77      0.77      0.77       242
       51-70       0.91      0.96      0.94       650
         71+       0.98      1.00      0.99       946

    accuracy                           0.92      1903
   macro avg       0.63      0.56      0.57      1903
weighted avg       0.91      0.92      0.91      1903

Random Forest Classifier: Accuracy = 0.9485023646873357
Classification Report:
              precision    recall  f1-score   support

        0-18       0.67      0.40      0.50         5
       19-30       0.96      0.77      0.85        60
       31-50       0.88      0.89      0.88       242
       51-70       0.92      0.95      0.94       650
         71+       0.99      0.97      0.98       946

    accuracy                           0.95      1903
   macro avg       0.88      0.80      0.83      1903
weighted avg       0.95      0.95      0.95      1903

Gradient Boosting Classifier: Accuracy = 0.9379926431949553
Classification Report:
              precision    recall  f1-score   support

        0-18       1.00      0.40      0.57         5
       19-30       0.96      0.77      0.85        60
       31-50       0.87      0.87      0.87       242
       51-70       0.90      0.95      0.92       650
         71+       0.98      0.96      0.97       946

    accuracy                           0.94      1903
   macro avg       0.94      0.79      0.84      1903
weighted avg       0.94      0.94      0.94      1903