#
tokens: 47592/50000 7/208 files (page 4/16)
lines: off (toggle) GitHub
raw markdown copy
This is page 4 of 16. Use http://codebase.md/mljar/mljar-supervised?lines=false&page={x} to view the full context.

# Directory Structure

```
├── .github
│   └── workflows
│       ├── run-tests.yml
│       ├── test-installation-with-conda.yml
│       └── test-installation-with-pip-on-windows.yml
├── .gitignore
├── CITATION
├── examples
│   ├── notebooks
│   │   ├── basic_run.ipynb
│   │   └── Titanic.ipynb
│   └── scripts
│       ├── binary_classifier_adult_fairness.py
│       ├── binary_classifier_ensemble.py
│       ├── binary_classifier_marketing.py
│       ├── binary_classifier_random.py
│       ├── binary_classifier_Titanic.py
│       ├── binary_classifier.py
│       ├── multi_class_classifier_digits.py
│       ├── multi_class_classifier_MNIST.py
│       ├── multi_class_classifier.py
│       ├── multi_class_drug_fairness.py
│       ├── regression_acs_fairness.py
│       ├── regression_crime_fairness.py
│       ├── regression_housing_fairness.py
│       ├── regression_law_school_fairness.py
│       ├── regression.py
│       └── tabular_mar_2021.py
├── LICENSE
├── MANIFEST.in
├── pytest.ini
├── README.md
├── requirements_dev.txt
├── requirements.txt
├── setup.py
├── supervised
│   ├── __init__.py
│   ├── algorithms
│   │   ├── __init__.py
│   │   ├── algorithm.py
│   │   ├── baseline.py
│   │   ├── catboost.py
│   │   ├── decision_tree.py
│   │   ├── extra_trees.py
│   │   ├── factory.py
│   │   ├── knn.py
│   │   ├── lightgbm.py
│   │   ├── linear.py
│   │   ├── nn.py
│   │   ├── random_forest.py
│   │   ├── registry.py
│   │   ├── sklearn.py
│   │   └── xgboost.py
│   ├── automl.py
│   ├── base_automl.py
│   ├── callbacks
│   │   ├── __init__.py
│   │   ├── callback_list.py
│   │   ├── callback.py
│   │   ├── early_stopping.py
│   │   ├── learner_time_constraint.py
│   │   ├── max_iters_constraint.py
│   │   ├── metric_logger.py
│   │   ├── terminate_on_nan.py
│   │   └── total_time_constraint.py
│   ├── ensemble.py
│   ├── exceptions.py
│   ├── fairness
│   │   ├── __init__.py
│   │   ├── metrics.py
│   │   ├── optimization.py
│   │   ├── plots.py
│   │   ├── report.py
│   │   └── utils.py
│   ├── model_framework.py
│   ├── preprocessing
│   │   ├── __init__.py
│   │   ├── datetime_transformer.py
│   │   ├── encoding_selector.py
│   │   ├── exclude_missing_target.py
│   │   ├── goldenfeatures_transformer.py
│   │   ├── kmeans_transformer.py
│   │   ├── label_binarizer.py
│   │   ├── label_encoder.py
│   │   ├── preprocessing_categorical.py
│   │   ├── preprocessing_missing.py
│   │   ├── preprocessing_utils.py
│   │   ├── preprocessing.py
│   │   ├── scale.py
│   │   └── text_transformer.py
│   ├── tuner
│   │   ├── __init__.py
│   │   ├── data_info.py
│   │   ├── hill_climbing.py
│   │   ├── mljar_tuner.py
│   │   ├── optuna
│   │   │   ├── __init__.py
│   │   │   ├── catboost.py
│   │   │   ├── extra_trees.py
│   │   │   ├── knn.py
│   │   │   ├── lightgbm.py
│   │   │   ├── nn.py
│   │   │   ├── random_forest.py
│   │   │   ├── tuner.py
│   │   │   └── xgboost.py
│   │   ├── preprocessing_tuner.py
│   │   ├── random_parameters.py
│   │   └── time_controller.py
│   ├── utils
│   │   ├── __init__.py
│   │   ├── additional_metrics.py
│   │   ├── additional_plots.py
│   │   ├── automl_plots.py
│   │   ├── common.py
│   │   ├── config.py
│   │   ├── constants.py
│   │   ├── data_validation.py
│   │   ├── importance.py
│   │   ├── jsonencoder.py
│   │   ├── leaderboard_plots.py
│   │   ├── learning_curves.py
│   │   ├── metric.py
│   │   ├── shap.py
│   │   ├── subsample.py
│   │   └── utils.py
│   └── validation
│       ├── __init__.py
│       ├── validation_step.py
│       ├── validator_base.py
│       ├── validator_custom.py
│       ├── validator_kfold.py
│       └── validator_split.py
└── tests
    ├── __init__.py
    ├── checks
    │   ├── __init__.py
    │   ├── check_automl_with_regression.py
    │   ├── run_ml_tests.py
    │   └── run_performance_tests.py
    ├── conftest.py
    ├── data
    │   ├── 179.csv
    │   ├── 24.csv
    │   ├── 3.csv
    │   ├── 31.csv
    │   ├── 38.csv
    │   ├── 44.csv
    │   ├── 720.csv
    │   ├── 737.csv
    │   ├── acs_income_1k.csv
    │   ├── adult_missing_values_missing_target_500rows.csv
    │   ├── boston_housing.csv
    │   ├── CrimeData
    │   │   ├── cities.json
    │   │   ├── crimedata.csv
    │   │   └── README.md
    │   ├── Drug
    │   │   ├── Drug_Consumption.csv
    │   │   └── README.md
    │   ├── housing_regression_missing_values_missing_target.csv
    │   ├── iris_classes_missing_values_missing_target.csv
    │   ├── iris_missing_values_missing_target.csv
    │   ├── LawSchool
    │   │   ├── bar_pass_prediction.csv
    │   │   └── README.md
    │   ├── PortugeseBankMarketing
    │   │   └── Data_FinalProject.csv
    │   └── Titanic
    │       ├── test_with_Survived.csv
    │       └── train.csv
    ├── README.md
    ├── tests_algorithms
    │   ├── __init__.py
    │   ├── test_baseline.py
    │   ├── test_catboost.py
    │   ├── test_decision_tree.py
    │   ├── test_extra_trees.py
    │   ├── test_factory.py
    │   ├── test_knn.py
    │   ├── test_lightgbm.py
    │   ├── test_linear.py
    │   ├── test_nn.py
    │   ├── test_random_forest.py
    │   ├── test_registry.py
    │   └── test_xgboost.py
    ├── tests_automl
    │   ├── __init__.py
    │   ├── test_adjust_validation.py
    │   ├── test_automl_init.py
    │   ├── test_automl_report.py
    │   ├── test_automl_sample_weight.py
    │   ├── test_automl_time_constraints.py
    │   ├── test_automl.py
    │   ├── test_data_types.py
    │   ├── test_dir_change.py
    │   ├── test_explain_levels.py
    │   ├── test_golden_features.py
    │   ├── test_handle_imbalance.py
    │   ├── test_integration.py
    │   ├── test_joblib_version.py
    │   ├── test_models_needed_for_predict.py
    │   ├── test_prediction_after_load.py
    │   ├── test_repeated_validation.py
    │   ├── test_restore.py
    │   ├── test_stack_models_constraints.py
    │   ├── test_targets.py
    │   └── test_update_errors_report.py
    ├── tests_callbacks
    │   ├── __init__.py
    │   └── test_total_time_constraint.py
    ├── tests_ensemble
    │   ├── __init__.py
    │   └── test_save_load.py
    ├── tests_fairness
    │   ├── __init__.py
    │   ├── test_binary_classification.py
    │   ├── test_multi_class_classification.py
    │   └── test_regression.py
    ├── tests_preprocessing
    │   ├── __init__.py
    │   ├── disable_eda.py
    │   ├── test_categorical_integers.py
    │   ├── test_datetime_transformer.py
    │   ├── test_encoding_selector.py
    │   ├── test_exclude_missing.py
    │   ├── test_goldenfeatures_transformer.py
    │   ├── test_label_binarizer.py
    │   ├── test_label_encoder.py
    │   ├── test_preprocessing_missing.py
    │   ├── test_preprocessing_utils.py
    │   ├── test_preprocessing.py
    │   ├── test_scale.py
    │   └── test_text_transformer.py
    ├── tests_tuner
    │   ├── __init__.py
    │   ├── test_hill_climbing.py
    │   ├── test_time_controller.py
    │   └── test_tuner.py
    ├── tests_utils
    │   ├── __init__.py
    │   ├── test_compute_additional_metrics.py
    │   ├── test_importance.py
    │   ├── test_learning_curves.py
    │   ├── test_metric.py
    │   ├── test_shap.py
    │   └── test_subsample.py
    └── tests_validation
        ├── __init__.py
        ├── test_validator_kfold.py
        └── test_validator_split.py
```

# Files

--------------------------------------------------------------------------------
/tests/tests_preprocessing/test_preprocessing.py:
--------------------------------------------------------------------------------

```python
import unittest

import numpy as np
import pandas as pd

from supervised.preprocessing.preprocessing import Preprocessing
from supervised.preprocessing.preprocessing_categorical import PreprocessingCategorical
from supervised.preprocessing.preprocessing_missing import PreprocessingMissingValues


class PreprocessingTest(unittest.TestCase):
    def test_constructor_preprocessing_step(self):
        preprocessing_params = {}
        ps = Preprocessing(preprocessing_params)

        self.assertTrue(len(ps._missing_values) == 0)
        self.assertTrue(len(ps._categorical) == 0)
        self.assertTrue(ps._categorical_y is None)

    def test_exclude_missing_targets_all_good(self):
        # training data
        d = {
            "col1": [1, 1, 1, 3],
            "col2": [5, 6, 7, 0],
            "col3": [1, 1, 1, 3],
            "col4": [2, 2, 4, 3],
            "y": [0, 1, 0, 1],
        }
        df = pd.DataFrame(data=d)
        X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
        y_train = df.loc[:, "y"]

        ps = Preprocessing()
        X_train, y_train = ps._exclude_missing_targets(X_train, y_train)

        self.assertEqual(4, X_train.shape[0])
        self.assertEqual(4, y_train.shape[0])

    def test_exclude_missing_targets(self):
        # training data
        d = {
            "col1": [1, 1, 1, 3],
            "col2": [5, 6, 7, 0],
            "col3": [1, 1, 1, 3],
            "col4": [2, 2, 4, 3],
            "y": [0, np.nan, 0, 1],
        }
        df = pd.DataFrame(data=d)
        X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
        y_train = df.loc[:, "y"]

        ps = Preprocessing()
        X_train, y_train = ps._exclude_missing_targets(X_train, y_train)

        self.assertEqual(3, X_train.shape[0])
        self.assertEqual(3, y_train.shape[0])

    def test_run_exclude_missing_targets(self):
        # training data
        d = {
            "col1": [1, 1, 1, 3],
            "col2": [5, 6, 7, 0],
            "col3": [1, 1, 1, 3],
            "col4": [2, 2, 4, 3],
            "y": [0, np.nan, 0, 1],
        }
        df = pd.DataFrame(data=d)
        X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
        y_train = df.loc[:, "y"]

        ps = Preprocessing()
        X_train, y_train, _ = ps.fit_and_transform(X_train, y_train)
        self.assertEqual(3, X_train.shape[0])
        self.assertEqual(3, y_train.shape[0])

    def test_run_all_good(self):
        # training data
        d = {
            "col1": [1, 1, 1, 3],
            "col2": [5, 6, 7, 0],
            "col3": [1, 1, 1, 3],
            "col4": [2, 2, 4, 3],
            "y": [0, 1, 0, 1],
        }
        df = pd.DataFrame(data=d)
        X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
        y_train = df.loc[:, "y"]

        preprocessing_params = {
            "columns_preprocessing": {
                "col1": [
                    PreprocessingMissingValues.FILL_NA_MEDIAN,
                    PreprocessingCategorical.CONVERT_INTEGER,
                ],
                "col2": [
                    PreprocessingMissingValues.FILL_NA_MEDIAN,
                    PreprocessingCategorical.CONVERT_INTEGER,
                ],
                "col3": [
                    PreprocessingMissingValues.FILL_NA_MEDIAN,
                    PreprocessingCategorical.CONVERT_INTEGER,
                ],
                "col4": [
                    PreprocessingMissingValues.FILL_NA_MEDIAN,
                    PreprocessingCategorical.CONVERT_INTEGER,
                ],
            }
        }

        ps = Preprocessing(preprocessing_params)

        X_train, y_train, _ = ps.fit_and_transform(X_train, y_train)

        for col in ["col1", "col2", "col3", "col4"]:
            self.assertTrue(col in X_train.columns)

        params_json = ps.to_json()
        self.assertEqual(len(params_json), 1)  # should store params only
        self.assertTrue("params" in params_json)

    def test_run_fill_median_convert_integer(self):
        # training data
        d = {
            "col1": [1, 1, np.nan, 3],
            "col2": ["a", "a", np.nan, "a"],
            "col3": [1, 1, 1, 3],
            "col4": ["a", "a", "b", "c"],
            "y": [0, 1, 0, 1],
        }
        df = pd.DataFrame(data=d)
        X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
        y_train = df.loc[:, "y"]

        preprocessing_params = {
            "columns_preprocessing": {
                "col1": [
                    PreprocessingMissingValues.FILL_NA_MEDIAN,
                    PreprocessingCategorical.CONVERT_INTEGER,
                ],
                "col2": [
                    PreprocessingMissingValues.FILL_NA_MEDIAN,
                    PreprocessingCategorical.CONVERT_INTEGER,
                ],
                "col3": [
                    PreprocessingMissingValues.FILL_NA_MEDIAN,
                    PreprocessingCategorical.CONVERT_INTEGER,
                ],
                "col4": [
                    PreprocessingMissingValues.FILL_NA_MEDIAN,
                    PreprocessingCategorical.CONVERT_INTEGER,
                ],
            }
        }

        ps = Preprocessing(preprocessing_params)
        X_train, y_train, _ = ps.fit_and_transform(X_train, y_train)

        for col in ["col1", "col2", "col3", "col4"]:
            self.assertTrue(col in X_train.columns)
        self.assertEqual(X_train["col1"][2], 1)
        self.assertEqual(X_train["col2"][2], 0)
        self.assertEqual(X_train["col4"][0], 0)
        self.assertEqual(X_train["col4"][1], 0)
        self.assertEqual(X_train["col4"][2], 1)
        self.assertEqual(X_train["col4"][3], 2)

        params_json = ps.to_json()

        self.assertTrue("missing_values" in params_json)
        self.assertTrue("categorical" in params_json)
        self.assertTrue("categorical_y" not in params_json)

        self.assertTrue("fill_params" in params_json["missing_values"][0])
        self.assertEqual(
            "na_fill_median", params_json["missing_values"][0]["fill_method"]
        )
        self.assertTrue("convert_params" in params_json["categorical"][0])
        self.assertEqual(
            "categorical_to_int", params_json["categorical"][0]["convert_method"]
        )

    def test_run_fill_median_convert_integer_validation_dataset(self):
        # training data
        d = {
            "col1": [1, 1, np.nan, 3],
            "col2": ["a", "a", np.nan, "a"],
            "col3": [1, 1, 1, 3],
            "col4": ["a", "a", "b", "c"],
            "y": [0, 1, 1, 1],
        }
        df = pd.DataFrame(data=d)
        X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
        y_train = df.loc[:, "y"]

        d_test = {
            "col1": [1, 1, np.nan, 3],
            "col2": ["a", "a", np.nan, "a"],
            "col3": [1, 1, 1, 3],
            "col4": ["a", "a", "b", "c"],
            "y": [np.nan, 1, np.nan, 1],
        }
        df_test = pd.DataFrame(data=d_test)
        X_test = df_test.loc[:, ["col1", "col2", "col3", "col4"]]
        y_test = df_test.loc[:, "y"]

        preprocessing_params = {
            "columns_preprocessing": {
                "col1": [
                    PreprocessingMissingValues.FILL_NA_MEDIAN,
                    PreprocessingCategorical.CONVERT_INTEGER,
                ],
                "col2": [
                    PreprocessingMissingValues.FILL_NA_MEDIAN,
                    PreprocessingCategorical.CONVERT_INTEGER,
                ],
                "col3": [
                    PreprocessingMissingValues.FILL_NA_MEDIAN,
                    PreprocessingCategorical.CONVERT_INTEGER,
                ],
                "col4": [
                    PreprocessingMissingValues.FILL_NA_MEDIAN,
                    PreprocessingCategorical.CONVERT_INTEGER,
                ],
            }
        }

        ps = Preprocessing(preprocessing_params)

        X_train, y_train, _ = ps.fit_and_transform(X_train, y_train)
        X_test, y_test, _ = ps.transform(X_test, y_test)

        for col in ["col1", "col2", "col3", "col4"]:
            self.assertTrue(col in X_train.columns)
            self.assertTrue(col in X_test.columns)

        self.assertEqual(4, X_train.shape[0])
        self.assertEqual(4, y_train.shape[0])
        self.assertEqual(2, X_test.shape[0])
        self.assertEqual(2, y_test.shape[0])

    def test_run_on_y_only(self):
        d = {"y": ["a", "b", "a", "b"]}
        df = pd.DataFrame(data=d)
        y_train = df.loc[:, "y"]

        preprocessing_params = {
            "target_preprocessing": [
                PreprocessingMissingValues.FILL_NA_MEDIAN,
                PreprocessingCategorical.CONVERT_INTEGER,
            ]
        }

        ps = Preprocessing(preprocessing_params)
        _, y_train, _ = ps.fit_and_transform(None, y_train)

        self.assertEqual(4, y_train.shape[0])
        self.assertEqual(0, y_train[0])
        self.assertEqual(1, y_train[1])

    def test_run_on_y_only_validation(self):
        d = {"y": ["a", "b", "a", "b"]}
        df = pd.DataFrame(data=d)
        y_train = df.loc[:, "y"]

        d_test = {"y": [np.nan, "a", np.nan, "b"]}
        df_test = pd.DataFrame(data=d_test)
        y_test = df_test.loc[:, "y"]

        preprocessing_params = {
            "target_preprocessing": [
                PreprocessingMissingValues.FILL_NA_MEDIAN,
                PreprocessingCategorical.CONVERT_INTEGER,
            ]
        }

        ps = Preprocessing(preprocessing_params)

        _, y_train, _ = ps.fit_and_transform(None, y_train)
        _, y_test, _ = ps.transform(None, y_test)

        self.assertEqual(4, y_train.shape[0])
        self.assertEqual(2, y_test.shape[0])
        self.assertEqual(0, y_train[0])
        self.assertEqual(1, y_train[1])
        self.assertEqual(0, y_test[0])
        self.assertEqual(1, y_test[1])

    def test_to_and_from_json_run_fill_median_convert_integer(self):
        # training data
        d = {
            "col1": [1, 1, np.nan, 3],
            "col2": ["a", "a", np.nan, "a"],
            "col3": [1, 1, 1, 3],
            "col4": ["a", "a", "b", "c"],
            "y": [0, 1, 0, 1],
        }
        df = pd.DataFrame(data=d)
        X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
        y_train = df.loc[:, "y"]

        preprocessing_params = {
            "columns_preprocessing": {
                "col1": [PreprocessingMissingValues.FILL_NA_MEDIAN],
                "col2": [
                    PreprocessingMissingValues.FILL_NA_MEDIAN,
                    PreprocessingCategorical.CONVERT_INTEGER,
                ],
                "col4": [
                    PreprocessingMissingValues.FILL_NA_MEDIAN,
                    PreprocessingCategorical.CONVERT_INTEGER,
                ],
            },
            "target_preprocessing": [],
        }

        ps = Preprocessing(preprocessing_params)
        _, _, _ = ps.fit_and_transform(X_train, y_train)

        ps2 = Preprocessing()
        ps2.from_json(ps.to_json(), "./")
        del ps

        d_test = {
            "col1": [1, 1, np.nan, 3],
            "col2": ["a", "a", np.nan, "a"],
            "col3": [1, 1, 1, 3],
            "col4": ["a", "a", "b", "c"],
            "y": [np.nan, np.nan, 1, 1],
        }
        df_test = pd.DataFrame(data=d_test)
        X_test = df_test.loc[:, ["col1", "col2", "col3", "col4"]]
        y_test = df_test.loc[:, "y"]

        X_test, y_test, _ = ps2.transform(X_test, y_test)

        self.assertEqual(2, y_test.shape[0])
        self.assertEqual(2, np.sum(y_test))
        self.assertEqual(1, X_test["col1"].iloc[0])
        self.assertEqual(0, X_test["col2"].iloc[0])

    def test_empty_column(self):
        # training data
        d = {
            "col1": [np.nan, np.nan, np.nan, np.nan],
            "col2": [5, 6, 7, 0],
            "col3": [1, 1, 1, 3],
            "col4": [2, 2, 4, 3],
            "y": [0, 1, 0, 1],
        }
        df = pd.DataFrame(data=d)
        X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
        y_train = df.loc[:, "y"]

        preprocessing_params = {"columns_preprocessing": {"col1": ["remove_column"]}}

        ps = Preprocessing(preprocessing_params)
        X_train1, _, _ = ps.fit_and_transform(X_train, y_train)

        self.assertTrue("col1" not in X_train1.columns)
        self.assertEqual(3, len(X_train1.columns))
        X_train2, _, _ = ps.transform(X_train, y_train)
        self.assertTrue("col1" not in X_train2.columns)
        self.assertEqual(3, len(X_train2.columns))
        for col in ["col2", "col3", "col4"]:
            self.assertTrue(col in X_train2.columns)

        params_json = ps.to_json()
        ps2 = Preprocessing()
        ps2.from_json(params_json, "./")

        X_train3, _, _ = ps2.transform(X_train, y_train)
        self.assertTrue("col1" not in X_train3.columns)
        self.assertEqual(3, len(X_train3.columns))
        for col in ["col2", "col3", "col4"]:
            self.assertTrue(col in X_train3.columns)


"""
    def test_run_fill_median_convert_one_hot_validation_dataset(self):
        # training data
        d = {
            "col1": [1, 1, np.nan, 3],
            "col2": ["a", "a", np.nan, "a"],
            "col3": [1, 1, 1, 3],
            "col4": ["a", "a", "b", "c"],
            "y": [0, 1, 1, 1],
        }
        df = pd.DataFrame(data=d)
        X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
        y_train = df.loc[:, "y"]

        d_test = {
            "col1": [1, 1, np.nan, 3],
            "col2": ["a", "z", np.nan, "a"],
            "col3": [1, 1, 1, 3],
            "col4": ["a", "a", "b", "c"],
            "y": [np.nan, 1, np.nan, 1],
        }
        df_test = pd.DataFrame(data=d_test)
        X_test = df_test.loc[:, ["col1", "col2", "col3", "col4"]]
        y_test = df_test.loc[:, "y"]

        ps = Preprocessing(
            missing_values_method=PreprocessingMissingValues.FILL_NA_MEDIAN,
            categorical_method=PreprocessingCategorical.CONVERT_ONE_HOT,
        )
        X_train, y_train, X_test, y_test = ps.run(
            X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test
        )

        for col in ["col1", "col2_a", "col3", "col4_a", "col4_b", "col4_c"]:
            self.assertTrue(col in X_train.columns)
            self.assertTrue(col in X_test.columns)

        self.assertEqual(4, X_train.shape[0])
        self.assertEqual(2, X_test.shape[0])
        self.assertEqual(4, np.sum(X_train["col2_a"]))
        self.assertEqual(2, np.sum(X_train["col4_a"]))
        self.assertEqual(1, np.sum(X_train["col4_b"]))
        self.assertEqual(1, np.sum(X_train["col4_c"]))
        self.assertEqual(0, X_test.loc[0, "col2_a"])
        self.assertEqual(1, X_test.loc[1, "col2_a"])

    def test_run_fill_median_convert_one_hot_big_categorical(self):

        a_lot = 250
        cs = []
        for i in range(a_lot):
            cs.append(str(uuid.uuid4().hex.upper()[0:6]))

        d = {
            "col1": cs,
            "col2": ["a", "b"] * int(a_lot / 2),
            "col3": range(a_lot),
            "col4": range(a_lot),
        }

        df = pd.DataFrame(data=d)
        X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
        X_train_2 = copy.deepcopy(X_train)

        ps = Preprocessing(
            missing_values_method=PreprocessingMissingValues.FILL_NA_MEDIAN,
            categorical_method=PreprocessingCategorical.CONVERT_ONE_HOT,
        )
        X_train, _, _, _ = ps.run(X_train=X_train)

        for col in ["col1", "col2_b", "col3", "col4"]:
            self.assertTrue(col in X_train.columns)

        self.assertTrue(
            np.max(X_train["col1"]) > 0.90 * a_lot
        )  # there can be duplicates ;)
        self.assertEqual(np.max(X_train["col2_b"]), 1)
        self.assertEqual(np.sum(X_train["col2_b"]), a_lot / 2)

        ps2 = Preprocessing()
        ps2.from_json(ps.to_json())
        del ps
        # apply preprocessing loaded from json
        _, _, X_train_2, _ = ps2.run(X_test=X_train_2)
        for col in ["col1", "col2_b", "col3", "col4"]:
            self.assertTrue(col in X_train_2.columns)

        self.assertTrue(
            np.max(X_train_2["col1"]) > 0.90 * a_lot
        )  # there can be duplicates ;)
        self.assertEqual(np.max(X_train_2["col2_b"]), 1)
        self.assertEqual(np.sum(X_train_2["col2_b"]), a_lot / 2)

    def test_convert_target(self):
        d = {
            "col1": [1, 1, np.nan, 3],
            "col2": ["a", "a", np.nan, "a"],
            "col3": [1, 1, 1, 3],
            "col4": ["a", "a", "b", "c"],
            "y": [2, 2, 1, 1],
        }
        df = pd.DataFrame(data=d)
        X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
        y_train = df.loc[:, "y"]

        ps = Preprocessing(
            missing_values_method=PreprocessingMissingValues.FILL_NA_MEDIAN,
            categorical_method=PreprocessingCategorical.CONVERT_ONE_HOT,
            project_task="PROJECT_BIN_CLASS",
        )
        X_train, y_train, _, _ = ps.run(X_train=X_train, y_train=y_train)

        self.assertEqual(2, len(np.unique(y_train)))
        self.assertTrue(0 in np.unique(y_train))
        self.assertTrue(1 in np.unique(y_train))

    def test_dont_convert_target(self):
        d = {
            "col1": [1, 1, np.nan, 3],
            "col2": ["a", "a", np.nan, "a"],
            "col3": [1, 1, 1, 3],
            "col4": ["a", "a", "b", "c"],
            "y": [2, 2, 1, 1],
        }
        df = pd.DataFrame(data=d)
        X_train = df.loc[:, ["col1", "col2", "col3", "col4"]]
        y_train = df.loc[:, "y"]

        ps = Preprocessing(
            missing_values_method=PreprocessingMissingValues.FILL_NA_MEDIAN,
            categorical_method=PreprocessingCategorical.CONVERT_ONE_HOT,
            project_task="PROJECT_REGRESSION",
        )
        X_train, y_train, _, _ = ps.run(X_train=X_train, y_train=y_train)

        self.assertEqual(2, len(np.unique(y_train)))
        self.assertTrue(1 in np.unique(y_train))
        self.assertTrue(2 in np.unique(y_train))
"""

if __name__ == "__main__":
    unittest.main()

```

--------------------------------------------------------------------------------
/supervised/ensemble.py:
--------------------------------------------------------------------------------

```python
import copy
import json
import logging
import os
import time
import uuid

import numpy as np
import pandas as pd

from supervised.algorithms.registry import (
    BINARY_CLASSIFICATION,
    MULTICLASS_CLASSIFICATION,
    REGRESSION,
)
from supervised.exceptions import NotTrainedException
from supervised.model_framework import ModelFramework
from supervised.utils.additional_metrics import AdditionalMetrics
from supervised.utils.config import LOG_LEVEL
from supervised.utils.jsonencoder import MLJSONEncoder
from supervised.utils.metric import Metric

logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)

from tabulate import tabulate

from supervised.utils.learning_curves import LearningCurves


class Ensemble:
    algorithm_name = "Greedy Ensemble"
    algorithm_short_name = "Ensemble"

    def __init__(
        self,
        optimize_metric="logloss",
        ml_task=BINARY_CLASSIFICATION,
        is_stacked=False,
        max_single_prediction_time=None,
        fairness_metric=None,
        fairness_threshold=None,
        privileged_groups=None,
        underprivileged_groups=None,
    ):
        self.library_version = "0.1"
        self.uid = str(uuid.uuid4())

        self.metric = Metric({"name": optimize_metric})
        self.best_loss = self.metric.get_maximum()  # the best loss obtained by ensemble
        self.models_map = None
        self.selected_models = []
        self.train_time = None
        self.total_best_sum = None  # total sum of predictions, the oof of ensemble
        self.target = None
        self.target_columns = None
        self.sample_weight = None
        self._ml_task = ml_task
        self._optimize_metric = optimize_metric
        self._is_stacked = is_stacked

        self._additional_metrics = None
        self._threshold = None
        self._name = "Ensemble_Stacked" if is_stacked else "Ensemble"
        self._scores = []
        self.oof_predictions = None
        self._oof_predictions_fname = None
        self._single_prediction_time = None  # prediction time on single sample
        self._max_single_prediction_time = max_single_prediction_time
        self.model_prediction_time = {}

        self._fairness_metric = fairness_metric
        self._fairness_threshold = fairness_threshold
        self._privileged_groups = privileged_groups
        self._underprivileged_groups = underprivileged_groups
        self._is_fair = None
        self.sensitive_features = None

    def get_train_time(self):
        return self.train_time

    def get_final_loss(self):
        return self.best_loss

    def is_valid(self):
        return len(self.selected_models) > 1

    def is_fast_enough(self, max_single_prediction_time):
        # dont need to check
        if max_single_prediction_time is None:
            return True

        # no iformation about prediction time
        if self._single_prediction_time is None:
            return True

        return self._single_prediction_time < max_single_prediction_time

    def get_type(self):
        prefix = ""  # "Stacked" if self._is_stacked else ""
        return prefix + self.algorithm_short_name

    def get_name(self):
        return self._name

    def involved_model_names(self):
        """Returns the list of all models involved in the current model.
        For single model, it returns the list with the name of the model.
        For ensemble model, it returns the list with the name of the ensemble and all internal models
        (used to build ensemble).
        For single model but trained on stacked data, it returns the list with the name of the model
        (names of models used in stacking are not included)."""
        if self.selected_models is None or not self.selected_models:
            return [self._name]
        l = []
        for m in self.selected_models:
            l += m["model"].involved_model_names()
        return [self._name] + l

    def get_metric_name(self):
        return self.metric.name

    def get_metric(self):
        return self.metric

    def get_out_of_folds(self):
        """Needed when ensemble is treated as model and we want to compute additional metrics for it"""
        # single prediction (in case of binary classification and regression)
        if self.oof_predictions is not None:
            return self.oof_predictions.copy(deep=True)

        if self._oof_predictions_fname is not None:
            self.oof_predictions = pd.read_csv(self._oof_predictions_fname)
            return self.oof_predictions.copy(deep=True)

        ensemble_oof = pd.DataFrame(
            data=self.total_best_sum, columns=self.total_best_sum.columns
        )
        ensemble_oof["target"] = self.target
        if self.sample_weight is not None:
            ensemble_oof["sample_weight"] = self.sample_weight

        # if self.sensitive_features is not None:
        #    for col in self.sensitive_features.columns:
        #        ensemble_oof[col] = self.sensitive_features[col]

        self.oof_predictions = ensemble_oof
        return ensemble_oof

    def _get_mean(self, oof_selected, best_sum, best_count):
        resp = copy.deepcopy(oof_selected)
        if best_count > 1:
            resp += best_sum
            resp /= float(best_count)
        return resp

    def get_oof_matrix(self, models):
        # remember models, will be needed in predictions
        self.models_map = {m.get_name(): m for m in models}

        if self._max_single_prediction_time is not None:
            self.model_prediction_time = {
                m.get_name(): m._single_prediction_time for m in models
            }

            if not [
                m for m in models if m.is_fast_enough(self._max_single_prediction_time)
            ]:
                raise NotTrainedException(
                    "Can't contruct ensemble with prediction time smaller than limit."
                )

        # check if we can construct fair ensemble
        if self._fairness_metric is not None:
            if not [m for m in models if m.is_fair()]:
                raise NotTrainedException("Can't contruct fair ensemble.")

        oofs = {}
        sensitive_features = None
        for m in models:
            # do not use model with RandomFeature
            if "RandomFeature" in m.get_name():
                continue

            # ensemble only the same level of stack
            # if m._is_stacked != self._is_stacked:
            #    continue
            oof = m.get_out_of_folds()
            prediction_cols = [c for c in oof.columns if "prediction" in c]
            oofs[m.get_name()] = oof[prediction_cols]  # oof["prediction"]
            if self.target is None:
                self.target_columns = [c for c in oof.columns if "target" in c]
                self.target = oof[
                    self.target_columns
                ]  # it will be needed for computing advance model statistics

            if self.sample_weight is None and "sample_weight" in oof.columns:
                self.sample_weight = oof["sample_weight"]

            sensitive_cols = [c for c in oof.columns if "sensitive" in c]
            if sensitive_cols and sensitive_features is None:
                sensitive_features = oof[sensitive_cols]

        return oofs, self.target, self.sample_weight, sensitive_features

    def get_additional_metrics(self):
        if self._additional_metrics is None:
            logger.debug("Get additional metrics for Ensemble")
            # 'target' - the target after processing used for model training
            # 'prediction' - out of folds predictions of the model
            oof_predictions = self.get_out_of_folds()
            prediction_cols = [c for c in oof_predictions.columns if "prediction" in c]
            target_cols = [c for c in oof_predictions.columns if "target" in c]

            oof_preds = oof_predictions[prediction_cols]
            if self._ml_task == MULTICLASS_CLASSIFICATION:
                cols = oof_preds.columns.tolist()
                # prediction_
                labels = {i: v[11:] for i, v in enumerate(cols)}

                oof_preds["label"] = np.argmax(
                    np.array(oof_preds[prediction_cols]), axis=1
                )
                oof_preds["label"] = oof_preds["label"].map(labels)

            sample_weight = None
            if "sample_weight" in oof_predictions.columns:
                sample_weight = oof_predictions["sample_weight"]

            self._additional_metrics = AdditionalMetrics.compute(
                oof_predictions[target_cols],
                oof_preds,
                sample_weight,
                self._ml_task,
                self.sensitive_features,
                self._fairness_metric
                if self._ml_task != REGRESSION
                else f"{self._fairness_metric}@{self.get_metric_name()}",
                self._fairness_threshold,
                self._privileged_groups,
                self._underprivileged_groups,
            )
            if self._ml_task == BINARY_CLASSIFICATION:
                self._threshold = float(self._additional_metrics["threshold"])

        return self._additional_metrics

    def get_sensitive_features_names(self):
        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        return [i for i in list(fm.keys()) if i != "fairness_optimization"]

    def get_fairness_metric(self, col_name):
        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        return fm.get(col_name, {}).get("fairness_metric_value")

    def get_fairness_optimization(self):
        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        return fm.get("fairness_optimization", {})

    def get_worst_fairness(self):
        # We have fairness metrics per sensitive feature.
        # The worst fairness metric is:
        # - for ratio metrics, the lowest fairness value from all sensitive features
        # - for difference metrics, the highest fairness value from all sensitive features
        # It is needed as bias mitigation stop criteria.

        metrics = self.get_additional_metrics()

        fm = metrics.get("fairness_metrics", {})
        worst_value = None
        for col_name, values in fm.items():
            if col_name == "fairness_optimization":
                continue
            if "ratio" in self._fairness_metric.lower():
                if worst_value is None:
                    worst_value = values.get("fairness_metric_value", 0)
                else:
                    worst_value = min(
                        worst_value, values.get("fairness_metric_value", 0)
                    )
            else:
                if worst_value is None:
                    worst_value = values.get("fairness_metric_value", 1)
                else:
                    worst_value = max(
                        worst_value, values.get("fairness_metric_value", 1)
                    )

        return worst_value

    def get_best_fairness(self):
        # We have fairness metrics per sensitive feature.
        # The best fairness metric is:
        # - for ratio metrics, the highest fairness value from all sensitive features
        # - for difference metrics, the lowest fairness value from all sensitive features
        # It is needed as bias mitigation stop criteria.

        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        best_value = None
        for col_name, values in fm.items():
            if col_name == "fairness_optimization":
                continue
            if "ratio" in self._fairness_metric.lower():
                if best_value is None:
                    best_value = values.get("fairness_metric_value", 0)
                else:
                    best_value = max(best_value, values.get("fairness_metric_value", 0))
            else:
                if best_value is None:
                    best_value = values.get("fairness_metric_value", 1)
                else:
                    best_value = min(best_value, values.get("fairness_metric_value", 1))

        return best_value

    def is_fair(self):
        if self._is_fair is not None:
            return self._is_fair
        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        for col, m in fm.items():
            if col == "fairness_optimization":
                continue
            if not m.get("is_fair", True):
                self._is_fair = False
                return False
        self._is_fair = True
        return False

    def fit(self, oofs, y, sample_weight=None, sensitive_features=None):
        logger.debug("Ensemble.fit")
        self.sensitive_features = sensitive_features
        start_time = time.time()
        selected_algs_cnt = 0  # number of selected algorithms
        self.best_algs = []  # selected algoritms indices from each loop

        total_prediction_time = 0
        best_sum = None  # sum of best algorihtms
        for j in range(len(oofs)):  # iterate over all solutions
            min_score = self.metric.get_maximum()
            best_model = None
            # try to add some algorithm to the best_sum to minimize metric
            for model_name in oofs.keys():
                if (
                    self._max_single_prediction_time
                    and model_name in self.model_prediction_time
                ):
                    if (
                        total_prediction_time + self.model_prediction_time[model_name]
                        > self._max_single_prediction_time
                    ):
                        continue
                # skip unfair models
                if (
                    self._fairness_metric is not None
                    and not self.models_map[model_name].is_fair()
                ):
                    continue
                y_ens = self._get_mean(oofs[model_name], best_sum, j + 1)
                score = self.metric(y, y_ens, sample_weight)
                if self.metric.improvement(previous=min_score, current=score):
                    min_score = score
                    best_model = model_name

            if best_model is None:
                continue
            # there is improvement, save it
            # save scores for plotting learning curve
            # if we optimize negative, then we need to multiply by -1.0
            # to save correct values in the learning curve
            sign = -1.0 if Metric.optimize_negative(self.metric.name) else 1.0
            self._scores += [sign * min_score]

            if self.metric.improvement(previous=self.best_loss, current=min_score):
                self.best_loss = min_score
                selected_algs_cnt = j

            self.best_algs.append(best_model)  # save the best algoritm
            # update best_sum value
            best_sum = (
                oofs[best_model] if best_sum is None else best_sum + oofs[best_model]
            )
            if j == selected_algs_cnt:
                self.total_best_sum = copy.deepcopy(best_sum)

            # update prediction time estimate
            if self._max_single_prediction_time is not None:
                total_prediction_time = np.sum(
                    [
                        self.model_prediction_time[name]
                        for name in np.unique(self.best_algs)
                    ]
                )
        # end of main loop #

        if not self.best_algs:
            raise NotTrainedException("Ensemble wasn't fitted.")

        # keep oof predictions of ensemble
        self.total_best_sum /= float(selected_algs_cnt + 1)
        self.best_algs = self.best_algs[: (selected_algs_cnt + 1)]

        logger.debug("Selected models for ensemble:")
        for model_name in np.unique(self.best_algs):
            self.selected_models += [
                {
                    "model": self.models_map[model_name],
                    "repeat": float(self.best_algs.count(model_name)),
                }
            ]
            logger.debug(f"{model_name} {self.best_algs.count(model_name)}")

        self._additional_metrics = self.get_additional_metrics()

        self.train_time = time.time() - start_time

    def predict(self, X, X_stacked=None):
        logger.debug(
            "Ensemble.predict with {} models".format(len(self.selected_models))
        )
        y_predicted_ensemble = None
        total_repeat = 0.0

        for selected in self.selected_models:
            model = selected["model"]
            repeat = selected["repeat"]
            total_repeat += repeat

            if model._is_stacked:
                y_predicted_from_model = model.predict(X_stacked)
            else:
                y_predicted_from_model = model.predict(X)

            prediction_cols = []
            if self._ml_task in [BINARY_CLASSIFICATION, MULTICLASS_CLASSIFICATION]:
                prediction_cols = [
                    c for c in y_predicted_from_model.columns if "prediction_" in c
                ]
            else:  # REGRESSION
                prediction_cols = ["prediction"]
            y_predicted_from_model = y_predicted_from_model[prediction_cols]
            y_predicted_ensemble = (
                y_predicted_from_model * repeat
                if y_predicted_ensemble is None
                else y_predicted_ensemble + y_predicted_from_model * repeat
            )

        y_predicted_ensemble /= total_repeat

        if self._ml_task == MULTICLASS_CLASSIFICATION:
            cols = y_predicted_ensemble.columns.tolist()
            # prediction_
            labels = {i: v[11:] for i, v in enumerate(cols)}

            y_predicted_ensemble["label"] = np.argmax(
                np.array(y_predicted_ensemble[prediction_cols]), axis=1
            )
            y_predicted_ensemble["label"] = y_predicted_ensemble["label"].map(labels)

        return y_predicted_ensemble

    def to_json(self):
        models_json = []
        for selected in self.selected_models:
            model = selected["model"]
            repeat = selected["repeat"]
            models_json += [{"model": model.to_json(), "repeat": repeat}]

        json_desc = {
            "library_version": self.library_version,
            "algorithm_name": self.algorithm_name,
            "algorithm_short_name": self.algorithm_short_name,
            "uid": self.uid,
            "models": models_json,
        }
        return json_desc

    def from_json(self, json_desc):
        self.library_version = json_desc.get("library_version", self.library_version)
        self.algorithm_name = json_desc.get("algorithm_name", self.algorithm_name)
        self.algorithm_short_name = json_desc.get(
            "algorithm_short_name", self.algorithm_short_name
        )
        self.uid = json_desc.get("uid", self.uid)
        self.selected_models = []
        models_json = json_desc.get("models")
        for selected in models_json:
            model = selected["model"]
            repeat = selected["repeat"]

            il = ModelFramework(model.get("params"))
            il.from_json(model)
            self.selected_models += [
                # {"model": LearnerFactory.load(model), "repeat": repeat}
                {"model": il, "repeat": repeat}
            ]

    def save(self, results_path, model_subpath):
        model_path = os.path.join(results_path, model_subpath)
        logger.info(f"Save the ensemble to {model_path}")

        predictions = self.get_out_of_folds()
        predictions_fname = os.path.join(model_subpath, f"predictions_ensemble.csv")
        self._oof_predictions_fname = os.path.join(results_path, predictions_fname)
        predictions.to_csv(self._oof_predictions_fname, index=False)

        with open(os.path.join(model_path, "ensemble.json"), "w") as fout:
            ms = []
            for selected in self.selected_models:
                ms += [{"model": selected["model"]._name, "repeat": selected["repeat"]}]

            desc = {
                "name": self._name,
                "ml_task": self._ml_task,
                "optimize_metric": self._optimize_metric,
                "selected_models": ms,
                "predictions_fname": predictions_fname,
                "metric_name": self.get_metric_name(),
                "final_loss": self.get_final_loss(),
                "train_time": self.get_train_time(),
                "is_stacked": self._is_stacked,
            }

            if self._threshold is not None:
                desc["threshold"] = self._threshold
            fout.write(json.dumps(desc, indent=4, cls=MLJSONEncoder))

        LearningCurves.plot_for_ensemble(self._scores, self.metric.name, model_path)

        # call additional metics just to be sure they are computed
        self._additional_metrics = self.get_additional_metrics()

        AdditionalMetrics.save(
            self._additional_metrics, self._ml_task, self.model_markdown(), model_path
        )

        with open(os.path.join(model_path, "status.txt"), "w") as fout:
            fout.write("ALL OK!")

    def model_markdown(self):
        select_models_desc = []
        for selected in self.selected_models:
            select_models_desc += [
                {"model": selected["model"]._name, "repeat": selected["repeat"]}
            ]
        desc = f"# Summary of {self.get_name()}\n\n"
        desc += "[<< Go back](../README.md)\n\n"
        desc += "\n## Ensemble structure\n"
        selected = pd.DataFrame(select_models_desc)
        desc += tabulate(selected.values, ["Model", "Weight"], tablefmt="pipe")
        desc += "\n"
        return desc

    @staticmethod
    def load(results_path, model_subpath, models_map):
        model_path = os.path.join(results_path, model_subpath)
        logger.info(f"Loading ensemble from {model_path}")

        with open(os.path.join(model_path, "ensemble.json")) as file:
            json_desc = json.load(file)

        ensemble = Ensemble(json_desc.get("optimize_metric"), json_desc.get("ml_task"))
        ensemble._name = json_desc.get("name", ensemble._name)
        ensemble._threshold = json_desc.get("threshold", ensemble._threshold)
        for m in json_desc.get("selected_models", []):
            ensemble.selected_models += [
                {"model": models_map[m["model"]], "repeat": m["repeat"]}
            ]

        ensemble.best_loss = json_desc.get("final_loss", ensemble.best_loss)
        ensemble.train_time = json_desc.get("train_time", ensemble.train_time)
        ensemble._is_stacked = json_desc.get("is_stacked", ensemble._is_stacked)
        predictions_fname = json_desc.get("predictions_fname")
        if predictions_fname is not None:
            ensemble._oof_predictions_fname = os.path.join(
                results_path, predictions_fname
            )

        return ensemble

```

--------------------------------------------------------------------------------
/supervised/fairness/metrics.py:
--------------------------------------------------------------------------------

```python
import numpy as np
import pandas as pd
from sklearn.metrics import (
    mean_absolute_error,
    mean_absolute_percentage_error,
    mean_squared_error,
    r2_score,
)

from supervised.fairness.optimization import FairnessOptimization
from supervised.fairness.plots import FairnessPlots
from supervised.fairness.utils import (
    accuracy,
    false_negative_rate,
    false_positive_rate,
    selection_rate,
    true_negative_rate,
    true_positive_rate,
)
from supervised.utils.metric import pearson, spearman


class FairnessMetrics:
    @staticmethod
    def binary_classification(
        target,
        predicted_labels,
        sensitive_features,
        fairness_metric,
        fairness_threshold,
        privileged_groups=[],
        underprivileged_groups=[],
        previous_fairness_optimization=None,
    ):
        target = np.array(target).ravel()
        preds = np.array(predicted_labels)

        fairness_metrics = {}

        for col in sensitive_features.columns:
            col_name = col[10:]  # skip 'senstive_'

            accuracies = []
            selection_rates = []
            tprs = []
            fprs = []
            tnrs = []
            fnrs = []
            samples = []
            demographic_parity_diff = None
            demographic_parity_ratio = None
            equalized_odds_diff = None
            equalized_odds_ratio = None

            # overall
            accuracies += [accuracy(target, preds)]
            selection_rates += [selection_rate(preds)]
            tprs += [true_positive_rate(target, preds)]
            fprs += [false_positive_rate(target, preds)]
            tnrs += [true_negative_rate(target, preds)]
            fnrs += [false_negative_rate(target, preds)]
            samples += [target.shape[0]]

            values = sensitive_features[col].unique()

            for value in values:
                accuracies += [
                    accuracy(
                        target[sensitive_features[col] == value],
                        preds[sensitive_features[col] == value],
                    )
                ]
                selection_rates += [
                    selection_rate(preds[sensitive_features[col] == value])
                ]
                tprs += [
                    true_positive_rate(
                        target[sensitive_features[col] == value],
                        preds[sensitive_features[col] == value],
                    )
                ]
                fprs += [
                    false_positive_rate(
                        target[sensitive_features[col] == value],
                        preds[sensitive_features[col] == value],
                    )
                ]
                tnrs += [
                    true_negative_rate(
                        target[sensitive_features[col] == value],
                        preds[sensitive_features[col] == value],
                    )
                ]
                fnrs += [
                    false_negative_rate(
                        target[sensitive_features[col] == value],
                        preds[sensitive_features[col] == value],
                    )
                ]
                samples += [np.sum([sensitive_features[col] == value])]

            metrics = pd.DataFrame(
                {
                    "Samples": samples,
                    "Accuracy": accuracies,
                    "Selection Rate": selection_rates,
                    "True Positive Rate": tprs,
                    "False Negative Rate": fnrs,
                    "False Positive Rate": fprs,
                    "True Negative Rate": tnrs,
                },
                index=["Overall"] + list(values),
            )

            max_selection_rate = np.max(selection_rates[1:])
            min_selection_rate = np.min(selection_rates[1:])

            privileged_value, underprivileged_value = None, None
            for pg in privileged_groups:
                if col_name in pg:
                    privileged_value = pg.get(col_name)
            for upg in underprivileged_groups:
                if col_name in upg:
                    underprivileged_value = upg.get(col_name)

            if privileged_value is not None:
                for i, v in enumerate(values):
                    if v == privileged_value:
                        # starting from 1 because first selection rate is for all samples
                        max_selection_rate = selection_rates[i + 1]

            if underprivileged_value is not None:
                for i, v in enumerate(values):
                    if v == underprivileged_value:
                        # starting from 1 because first selection rate is for all samples
                        min_selection_rate = selection_rates[i + 1]

            demographic_parity_diff = np.round(
                max_selection_rate - min_selection_rate, 4
            )
            demographic_parity_ratio = np.round(
                min_selection_rate / max_selection_rate, 4
            )

            tpr_min = np.min(tprs[1:])
            tpr_max = np.max(tprs[1:])

            fpr_min = np.min(fprs[1:])
            fpr_max = np.max(fprs[1:])

            if privileged_value is not None:
                for i, v in enumerate(values):
                    if v == privileged_value:
                        # starting from 1 because first value is for all samples
                        tpr_max = tprs[i + 1]
                        fpr_max = fprs[i + 1]

            if underprivileged_value is not None:
                for i, v in enumerate(values):
                    if v == underprivileged_value:
                        # starting from 1 because first value is for all samples
                        tpr_min = tprs[i + 1]
                        fpr_min = fprs[i + 1]

            equalized_odds_diff = np.round(max(tpr_max - tpr_min, fpr_max - fpr_min), 4)
            equalized_odds_ratio = np.round(
                min(tpr_min / tpr_max, fpr_min / fpr_max), 4
            )

            stats = pd.DataFrame(
                {
                    "": [
                        demographic_parity_diff,
                        demographic_parity_ratio,
                        equalized_odds_diff,
                        equalized_odds_ratio,
                    ]
                },
                index=[
                    "Demographic Parity Difference",
                    "Demographic Parity Ratio",
                    "Equalized Odds Difference",
                    "Equalized Odds Ratio",
                ],
            )

            fairness_metric_name = ""
            fairness_metric_value = 0
            is_fair = False
            if fairness_metric == "demographic_parity_difference":
                fairness_metric_name = "Demographic Parity Difference"
                fairness_metric_value = demographic_parity_diff
                is_fair = demographic_parity_diff < fairness_threshold
            elif fairness_metric == "demographic_parity_ratio":
                fairness_metric_name = "Demographic Parity Ratio"
                fairness_metric_value = demographic_parity_ratio
                is_fair = demographic_parity_ratio > fairness_threshold
            elif fairness_metric == "equalized_odds_difference":
                fairness_metric_name = "Equalized Odds Difference"
                fairness_metric_value = equalized_odds_diff
                is_fair = equalized_odds_diff < fairness_threshold
            elif fairness_metric == "equalized_odds_ratio":
                fairness_metric_name = "Equalized Odds Ratio"
                fairness_metric_value = equalized_odds_ratio
                is_fair = equalized_odds_ratio > fairness_threshold

            if "parity" in fairness_metric:
                if privileged_value is None:
                    ind = np.argmax(selection_rates[1:])
                    privileged_value = values[ind]
                if underprivileged_value is None:
                    ind = np.argmin(selection_rates[1:])
                    underprivileged_value = values[ind]

            if "odds" in fairness_metric:
                if tpr_max - tpr_min > fpr_max - fpr_min:
                    if privileged_value is None:
                        ind = np.argmax(tprs[1:])
                        privileged_value = values[ind]
                    if underprivileged_value is None:
                        ind = np.argmin(tprs[1:])
                        underprivileged_value = values[ind]
                else:
                    if privileged_value is None:
                        ind = np.argmax(fprs[1:])
                        privileged_value = values[ind]
                    if underprivileged_value is None:
                        ind = np.argmin(fprs[1:])
                        underprivileged_value = values[ind]

            fairness_metrics[col_name] = {
                "metrics": metrics,
                "stats": stats,
                "figures": FairnessPlots.binary_classification(
                    fairness_metric,
                    col_name,
                    metrics,
                    selection_rates,
                    max_selection_rate,
                    fairness_threshold,
                ),
                "fairness_metric_name": fairness_metric_name,
                "fairness_metric_value": fairness_metric_value,
                "is_fair": is_fair,
                "privileged_value": privileged_value,
                "underprivileged_value": underprivileged_value,
            }

        # fairness optimization stats
        fairness_metrics[
            "fairness_optimization"
        ] = FairnessOptimization.binary_classification(
            target,
            predicted_labels,
            sensitive_features,
            fairness_metric,
            fairness_threshold,
            privileged_groups,
            underprivileged_groups,
            previous_fairness_optimization,
            min_selection_rate,
            max_selection_rate,
        )

        return fairness_metrics

    @staticmethod
    def regression(
        target,
        predictions,
        sensitive_features,
        fairness_metric,
        fairness_threshold,
        privileged_groups=[],
        underprivileged_groups=[],
        previous_fairness_optimization=None,
    ):
        metric_name = fairness_metric.split("@")[1].upper()

        if "ratio" in fairness_metric.lower():
            fairness_metric_name = f"Group Loss Ratio @ {metric_name}"
        else:
            fairness_metric_name = f"Group Loss Difference @ {metric_name}"

        fairness_metrics = {}

        regression_metrics = {
            "SAMPLES": lambda t, p, sw=None: t.shape[0],
            "MAE": mean_absolute_error,
            "MSE": mean_squared_error,
            "RMSE": lambda t, p, sample_weight=None: np.sqrt(
                mean_squared_error(t, p, sample_weight=sample_weight)
            ),
            "R2": r2_score,
            "MAPE": mean_absolute_percentage_error,
            "SPEARMAN": spearman,
            "PEARSON": pearson,
        }
        overall = {}
        for k, v in regression_metrics.items():
            overall[k] = v(target, predictions)

        for col in sensitive_features.columns:
            col_name = col[10:]  # skip 'senstive_'

            values = sensitive_features[col].unique()
            all_metrics = [overall]

            for value in values:
                metrics = {}
                for k, v in regression_metrics.items():
                    metrics[k] = v(
                        target[sensitive_features[col] == value],
                        predictions[sensitive_features[col] == value],
                    )
                all_metrics += [metrics]

            mdf = pd.DataFrame(all_metrics, index=["Overall"] + list(values))

            privileged_value, underprivileged_value = None, None
            for pg in privileged_groups:
                if col_name in pg:
                    privileged_value = pg.get(col_name)
            for upg in underprivileged_groups:
                if col_name in upg:
                    underprivileged_value = upg.get(col_name)

            if privileged_value is None:
                if metric_name in ["R2", "SPEARMAN", "PEARSON"]:
                    # the higher the better
                    privileged_value = mdf.index[
                        mdf[metric_name][1:].argmax() + 1
                    ]  # without overall metrics
                else:
                    # the lower the better
                    privileged_value = mdf.index[
                        mdf[metric_name][1:].argmin() + 1
                    ]  # without overall metrics

            if underprivileged_value is None:
                if metric_name in ["R2", "SPEARMAN", "PEARSON"]:
                    # the higher the better
                    underprivileged_value = mdf.index[
                        mdf[metric_name][1:].argmin() + 1
                    ]  # without overall metrics
                else:
                    # the lower the better
                    underprivileged_value = mdf.index[
                        mdf[metric_name][1:].argmax() + 1
                    ]  # without overall metrics

            metric_min = mdf[metric_name].loc[privileged_value]
            metric_max = mdf[metric_name].loc[underprivileged_value]

            ratio = np.round(metric_min / metric_max, 4)
            diff = np.round(metric_max - metric_min, 4)

            # ratio = np.round(mdf[metric_name][1:].min()/mdf[metric_name][1:].max(), 4)
            # diff = np.round(mdf[metric_name][1:].max()-mdf[metric_name][1:].min(), 4)

            is_fair = False
            if "ratio" in fairness_metric.lower():
                fairness_metric_value = ratio
                if ratio > fairness_threshold:
                    is_fair = True
            else:
                fairness_metric_value = diff
                if diff < fairness_threshold:
                    is_fair = True

            fairness_metrics[col_name] = {
                "metrics": mdf,
                "figures": FairnessPlots.regression(
                    fairness_metric, col_name, mdf, fairness_metric_name
                ),
                "privileged_value": privileged_value,
                "underprivileged_value": underprivileged_value,
                "ratio": ratio,
                "diff": diff,
                "metric_name": metric_name,
                "fairness_metric_name": fairness_metric_name,
                "fairness_metric_value": fairness_metric_value,
                "is_fair": is_fair,
                "fairness_threshold": fairness_threshold,
            }

        fairness_metrics["fairness_optimization"] = FairnessOptimization.regression(
            target,
            predictions,
            sensitive_features,
            fairness_metric,
            fairness_threshold,
            privileged_groups,
            underprivileged_groups,
            previous_fairness_optimization,
            performance_metric=regression_metrics[metric_name],
            performance_metric_name=metric_name,
        )

        return fairness_metrics

    @staticmethod
    def multiclass_classification(
        original_target,
        predicted_labels,
        sensitive_features,
        fairness_metric,
        fairness_threshold,
        privileged_groups=[],
        underprivileged_groups=[],
        previous_fairness_optimization=None,
    ):
        original_target = np.array(original_target).ravel()
        predicted_labels = np.array(predicted_labels)
        target_values = list(np.unique(original_target))

        fairness_metrics = {}

        for col in sensitive_features.columns:
            col_name = col[10:]  # skip 'senstive_'

            for target_value in target_values:
                # we need to reset them for each target value
                privileged_value, underprivileged_value = None, None
                for pg in privileged_groups:
                    if col_name in pg:
                        privileged_value = pg.get(col_name)
                for upg in underprivileged_groups:
                    if col_name in upg:
                        underprivileged_value = upg.get(col_name)

                target = np.copy(original_target)
                target[original_target == target_value] = 1
                target[original_target != target_value] = 0

                preds = np.copy(predicted_labels)
                preds[predicted_labels == target_value] = 1
                preds[predicted_labels != target_value] = 0

                accuracies = []
                selection_rates = []
                tprs = []
                fprs = []
                tnrs = []
                fnrs = []
                samples = []
                demographic_parity_diff = None
                demographic_parity_ratio = None
                equalized_odds_diff = None
                equalized_odds_ratio = None

                # overall
                accuracies += [accuracy(target, preds)]
                selection_rates += [selection_rate(preds)]
                tprs += [true_positive_rate(target, preds)]
                fprs += [false_positive_rate(target, preds)]
                tnrs += [true_negative_rate(target, preds)]
                fnrs += [false_negative_rate(target, preds)]
                samples += [target.shape[0]]

                values = sensitive_features[col].unique()

                for value in values:
                    accuracies += [
                        accuracy(
                            target[sensitive_features[col] == value],
                            preds[sensitive_features[col] == value],
                        )
                    ]
                    selection_rates += [
                        selection_rate(preds[sensitive_features[col] == value])
                    ]
                    tprs += [
                        true_positive_rate(
                            target[sensitive_features[col] == value],
                            preds[sensitive_features[col] == value],
                        )
                    ]
                    fprs += [
                        false_positive_rate(
                            target[sensitive_features[col] == value],
                            preds[sensitive_features[col] == value],
                        )
                    ]
                    tnrs += [
                        true_negative_rate(
                            target[sensitive_features[col] == value],
                            preds[sensitive_features[col] == value],
                        )
                    ]
                    fnrs += [
                        false_negative_rate(
                            target[sensitive_features[col] == value],
                            preds[sensitive_features[col] == value],
                        )
                    ]
                    samples += [np.sum([sensitive_features[col] == value])]

                metrics = pd.DataFrame(
                    {
                        "Samples": samples,
                        "Accuracy": accuracies,
                        "Selection Rate": selection_rates,
                        "True Positive Rate": tprs,
                        "False Negative Rate": fnrs,
                        "False Positive Rate": fprs,
                        "True Negative Rate": tnrs,
                    },
                    index=["Overall"] + list(values),
                )

                max_selection_rate = np.max(selection_rates[1:])
                min_selection_rate = np.min(selection_rates[1:])

                if privileged_value is not None:
                    for i, v in enumerate(values):
                        if v == privileged_value:
                            # starting from 1 because first selection rate is for all samples
                            max_selection_rate = selection_rates[i + 1]

                if underprivileged_value is not None:
                    for i, v in enumerate(values):
                        if v == underprivileged_value:
                            # starting from 1 because first selection rate is for all samples
                            min_selection_rate = selection_rates[i + 1]

                demographic_parity_diff = np.round(
                    max_selection_rate - min_selection_rate, 4
                )
                demographic_parity_ratio = np.round(
                    min_selection_rate / max_selection_rate, 4
                )

                tpr_min = np.min(tprs[1:])
                tpr_max = np.max(tprs[1:])

                fpr_min = np.min(fprs[1:])
                fpr_max = np.max(fprs[1:])

                if privileged_value is not None:
                    for i, v in enumerate(values):
                        if v == privileged_value:
                            # starting from 1 because first value is for all samples
                            tpr_max = tprs[i + 1]
                            fpr_max = fprs[i + 1]

                if underprivileged_value is not None:
                    for i, v in enumerate(values):
                        if v == underprivileged_value:
                            # starting from 1 because first value is for all samples
                            tpr_min = tprs[i + 1]
                            fpr_min = fprs[i + 1]

                equalized_odds_diff = np.round(
                    max(tpr_max - tpr_min, fpr_max - fpr_min), 4
                )
                equalized_odds_ratio = np.round(
                    min(tpr_min / tpr_max, fpr_min / fpr_max), 4
                )

                stats = pd.DataFrame(
                    {
                        "": [
                            demographic_parity_diff,
                            demographic_parity_ratio,
                            equalized_odds_diff,
                            equalized_odds_ratio,
                        ]
                    },
                    index=[
                        "Demographic Parity Difference",
                        "Demographic Parity Ratio",
                        "Equalized Odds Difference",
                        "Equalized Odds Ratio",
                    ],
                )

                fairness_metric_name = ""
                fairness_metric_value = 0
                is_fair = False
                if fairness_metric == "demographic_parity_difference":
                    fairness_metric_name = "Demographic Parity Difference"
                    fairness_metric_value = demographic_parity_diff
                    is_fair = demographic_parity_diff < fairness_threshold
                elif fairness_metric == "demographic_parity_ratio":
                    fairness_metric_name = "Demographic Parity Ratio"
                    fairness_metric_value = demographic_parity_ratio
                    is_fair = demographic_parity_ratio > fairness_threshold
                elif fairness_metric == "equalized_odds_difference":
                    fairness_metric_name = "Equalized Odds Difference"
                    fairness_metric_value = equalized_odds_diff
                    is_fair = equalized_odds_diff < fairness_threshold
                elif fairness_metric == "equalized_odds_ratio":
                    fairness_metric_name = "Equalized Odds Ratio"
                    fairness_metric_value = equalized_odds_ratio
                    is_fair = equalized_odds_ratio > fairness_threshold

                if "parity" in fairness_metric:
                    if privileged_value is None:
                        ind = np.argmax(selection_rates[1:])
                        privileged_value = values[ind]
                    if underprivileged_value is None:
                        ind = np.argmin(selection_rates[1:])
                        underprivileged_value = values[ind]

                if "odds" in fairness_metric:
                    if tpr_max - tpr_min > fpr_max - fpr_min:
                        if privileged_value is None:
                            ind = np.argmax(tprs[1:])
                            privileged_value = values[ind]
                        if underprivileged_value is None:
                            ind = np.argmin(tprs[1:])
                            underprivileged_value = values[ind]
                    else:
                        if privileged_value is None:
                            ind = np.argmax(fprs[1:])
                            privileged_value = values[ind]
                        if underprivileged_value is None:
                            ind = np.argmin(fprs[1:])
                            underprivileged_value = values[ind]

                fairness_metrics[f"{col_name}__{target_value}"] = {
                    "metrics": metrics,
                    "stats": stats,
                    "figures": FairnessPlots.binary_classification(
                        fairness_metric,
                        f"{col_name}__{target_value}",
                        metrics,
                        selection_rates,
                        max_selection_rate,
                        fairness_threshold,
                    ),
                    "fairness_metric_name": fairness_metric_name,
                    "fairness_metric_value": fairness_metric_value,
                    "is_fair": is_fair,
                    "privileged_value": privileged_value,
                    "underprivileged_value": underprivileged_value,
                }

        # fairness optimization stats
        fairness_metrics[
            "fairness_optimization"
        ] = FairnessOptimization.multiclass_classification(
            original_target,
            predicted_labels,
            sensitive_features,
            fairness_metric,
            fairness_threshold,
            privileged_groups,
            underprivileged_groups,
            previous_fairness_optimization,
        )

        return fairness_metrics

```

--------------------------------------------------------------------------------
/supervised/preprocessing/preprocessing.py:
--------------------------------------------------------------------------------

```python
import logging

import numpy as np
import pandas as pd

from supervised.algorithms.registry import (
    BINARY_CLASSIFICATION,
    MULTICLASS_CLASSIFICATION,
)
from supervised.exceptions import AutoMLException
from supervised.preprocessing.datetime_transformer import DateTimeTransformer
from supervised.preprocessing.exclude_missing_target import ExcludeRowsMissingTarget
from supervised.preprocessing.goldenfeatures_transformer import (
    GoldenFeaturesTransformer,
)
from supervised.preprocessing.kmeans_transformer import KMeansTransformer
from supervised.preprocessing.label_binarizer import LabelBinarizer
from supervised.preprocessing.label_encoder import LabelEncoder
from supervised.preprocessing.preprocessing_categorical import PreprocessingCategorical
from supervised.preprocessing.preprocessing_missing import PreprocessingMissingValues
from supervised.preprocessing.scale import Scale
from supervised.preprocessing.text_transformer import TextTransformer
from supervised.utils.config import LOG_LEVEL

logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)


class Preprocessing(object):
    def __init__(
        self,
        preprocessing_params={"target_preprocessing": [], "columns_preprocessing": {}},
        model_name=None,
        k_fold=None,
        repeat=None,
    ):
        self._params = preprocessing_params

        if "target_preprocessing" not in preprocessing_params:
            self._params["target_preprocessing"] = []
        if "columns_preprocessing" not in preprocessing_params:
            self._params["columns_preprocessing"] = {}

        # preprocssing step attributes
        self._categorical_y = None
        self._scale_y = None
        self._missing_values = []
        self._categorical = []
        self._scale = []
        self._remove_columns = []
        self._datetime_transforms = []
        self._text_transforms = []
        self._golden_features = None
        self._kmeans = None
        self._add_random_feature = self._params.get("add_random_feature", False)
        self._drop_features = self._params.get("drop_features", [])
        self._model_name = model_name
        self._k_fold = k_fold
        self._repeat = repeat

    def _exclude_missing_targets(self, X=None, y=None):
        # check if there are missing values in target column
        if y is None:
            return X, y
        y_missing = pd.isnull(y)
        if np.sum(np.array(y_missing)) == 0:
            return X, y
        y = y.drop(y.index[y_missing])
        y.index = range(y.shape[0])
        if X is not None:
            X = X.drop(X.index[y_missing])
            X.index = range(X.shape[0])
        return X, y

    # fit and transform
    def fit_and_transform(self, X_train, y_train, sample_weight=None):
        logger.debug("Preprocessing.fit_and_transform")

        if y_train is not None:
            # target preprocessing
            # this must be used first, maybe we will drop some rows because of missing target values
            target_preprocessing = self._params.get("target_preprocessing")
            logger.debug("target_preprocessing params: {}".format(target_preprocessing))

            X_train, y_train, sample_weight, _ = ExcludeRowsMissingTarget.transform(
                X_train, y_train, sample_weight
            )

            if PreprocessingCategorical.CONVERT_INTEGER in target_preprocessing:
                logger.debug("Convert target to integer")
                self._categorical_y = LabelEncoder(try_to_fit_numeric=True)
                self._categorical_y.fit(y_train)
                y_train = pd.Series(self._categorical_y.transform(y_train))

            if PreprocessingCategorical.CONVERT_ONE_HOT in target_preprocessing:
                logger.debug("Convert target to one-hot coding")
                self._categorical_y = LabelBinarizer()
                self._categorical_y.fit(pd.DataFrame({"target": y_train}), "target")
                y_train = self._categorical_y.transform(
                    pd.DataFrame({"target": y_train}), "target"
                )

            if Scale.SCALE_LOG_AND_NORMAL in target_preprocessing:
                logger.debug("Scale log and normal")

                self._scale_y = Scale(
                    ["target"], scale_method=Scale.SCALE_LOG_AND_NORMAL
                )
                y_train = pd.DataFrame({"target": y_train})
                self._scale_y.fit(y_train)
                y_train = self._scale_y.transform(y_train)
                y_train = y_train["target"]

            if Scale.SCALE_NORMAL in target_preprocessing:
                logger.debug("Scale normal")

                self._scale_y = Scale(["target"], scale_method=Scale.SCALE_NORMAL)
                y_train = pd.DataFrame({"target": y_train})
                self._scale_y.fit(y_train)
                y_train = self._scale_y.transform(y_train)
                y_train = y_train["target"]

        # columns preprocessing
        columns_preprocessing = self._params.get("columns_preprocessing")
        for column in columns_preprocessing:
            transforms = columns_preprocessing[column]
            # logger.debug("Preprocess column {} with: {}".format(column, transforms))

        # remove empty or constant columns
        cols_to_remove = list(
            filter(
                lambda k: "remove_column" in columns_preprocessing[k],
                columns_preprocessing,
            )
        )

        if X_train is not None:
            X_train.drop(cols_to_remove, axis=1, inplace=True)
        self._remove_columns = cols_to_remove

        numeric_cols = []  # get numeric cols before text transformations
        # needed for golden features
        if X_train is not None and (
            "golden_features" in self._params or "kmeans_features" in self._params
        ):
            numeric_cols = X_train.select_dtypes(include="number").columns.tolist()

        # there can be missing values in the text data,
        # but we don't want to handle it by fill missing methods
        # zeros will be imputed by text_transform method
        cols_to_process = list(
            filter(
                lambda k: "text_transform" in columns_preprocessing[k],
                columns_preprocessing,
            )
        )

        new_text_columns = []
        for col in cols_to_process:
            t = TextTransformer()
            t.fit(X_train, col)
            X_train = t.transform(X_train)
            self._text_transforms += [t]
            new_text_columns += t._new_columns
        # end of text transform

        for missing_method in [PreprocessingMissingValues.FILL_NA_MEDIAN]:
            cols_to_process = list(
                filter(
                    lambda k: missing_method in columns_preprocessing[k],
                    columns_preprocessing,
                )
            )
            missing = PreprocessingMissingValues(cols_to_process, missing_method)
            missing.fit(X_train)
            X_train = missing.transform(X_train)
            self._missing_values += [missing]

        # golden features
        golden_columns = []
        if "golden_features" in self._params:
            results_path = self._params["golden_features"]["results_path"]
            ml_task = self._params["golden_features"]["ml_task"]
            features_count = self._params["golden_features"].get("features_count")
            n_jobs = self._params["golden_features"].get("n_jobs", -1)
            self._golden_features = GoldenFeaturesTransformer(
                results_path, ml_task, features_count, n_jobs
            )
            self._golden_features.fit(X_train[numeric_cols], y_train)
            X_train = self._golden_features.transform(X_train)
            golden_columns = self._golden_features._new_columns

        kmeans_columns = []
        if "kmeans_features" in self._params:
            results_path = self._params["kmeans_features"]["results_path"]
            self._kmeans = KMeansTransformer(
                results_path, self._model_name, self._k_fold
            )
            self._kmeans.fit(X_train[numeric_cols], y_train)
            X_train = self._kmeans.transform(X_train)
            kmeans_columns = self._kmeans._new_features

        for convert_method in [
            PreprocessingCategorical.CONVERT_INTEGER,
            PreprocessingCategorical.CONVERT_ONE_HOT
        ]:
            cols_to_process = list(
                filter(
                    lambda k: convert_method in columns_preprocessing[k],
                    columns_preprocessing,
                )
            )
            convert = PreprocessingCategorical(cols_to_process, convert_method)
            convert.fit(X_train, y_train)
            X_train = convert.transform(X_train)
            self._categorical += [convert]

        # datetime transform
        cols_to_process = list(
            filter(
                lambda k: "datetime_transform" in columns_preprocessing[k],
                columns_preprocessing,
            )
        )

        new_datetime_columns = []
        for col in cols_to_process:
            t = DateTimeTransformer()
            t.fit(X_train, col)
            X_train = t.transform(X_train)
            self._datetime_transforms += [t]
            new_datetime_columns += t._new_columns

        # SCALE
        for scale_method in [Scale.SCALE_NORMAL, Scale.SCALE_LOG_AND_NORMAL]:
            cols_to_process = list(
                filter(
                    lambda k: scale_method in columns_preprocessing[k],
                    columns_preprocessing,
                )
            )
            if (
                len(cols_to_process)
                and len(new_datetime_columns)
                and scale_method == Scale.SCALE_NORMAL
            ):
                cols_to_process += new_datetime_columns
            if (
                len(cols_to_process)
                and len(new_text_columns)
                and scale_method == Scale.SCALE_NORMAL
            ):
                cols_to_process += new_text_columns

            if (
                len(cols_to_process)
                and len(golden_columns)
                and scale_method == Scale.SCALE_NORMAL
            ):
                cols_to_process += golden_columns

            if (
                len(cols_to_process)
                and len(kmeans_columns)
                and scale_method == Scale.SCALE_NORMAL
            ):
                cols_to_process += kmeans_columns

            if len(cols_to_process):
                scale = Scale(cols_to_process)
                scale.fit(X_train)
                X_train = scale.transform(X_train)
                self._scale += [scale]

        if self._add_random_feature:
            # -1, 1, with 0 mean
            X_train["random_feature"] = np.random.rand(X_train.shape[0]) * 2.0 - 1.0

        if self._drop_features:
            available_cols = X_train.columns.tolist()
            drop_cols = [c for c in self._drop_features if c in available_cols]
            if len(drop_cols) == X_train.shape[1]:
                raise AutoMLException(
                    "All features are droppped! Your data looks like random data."
                )
            if drop_cols:
                X_train.drop(drop_cols, axis=1, inplace=True)
            self._drop_features = drop_cols

        if X_train is not None:
            # there can be catagorical columns (in CatBoost) which cant be clipped
            numeric_cols = X_train.select_dtypes(include="number").columns.tolist()
            X_train[numeric_cols] = X_train[numeric_cols].clip(
                lower=np.finfo(np.float32).min + 1000,
                upper=np.finfo(np.float32).max - 1000,
            )

        return X_train, y_train, sample_weight

    def transform(self, X_validation, y_validation, sample_weight_validation=None):
        logger.debug("Preprocessing.transform")

        # doing copy to avoid SettingWithCopyWarning
        if X_validation is not None:
            X_validation = X_validation.copy(deep=False)
        if y_validation is not None:
            y_validation = y_validation.copy(deep=False)

        # target preprocessing
        # this must be used first, maybe we will drop some rows because of missing target values
        if y_validation is not None:
            target_preprocessing = self._params.get("target_preprocessing")
            logger.debug("target_preprocessing -> {}".format(target_preprocessing))

            (
                X_validation,
                y_validation,
                sample_weight_validation,
                _,
            ) = ExcludeRowsMissingTarget.transform(
                X_validation, y_validation, sample_weight_validation
            )

            if PreprocessingCategorical.CONVERT_INTEGER in target_preprocessing:
                if y_validation is not None and self._categorical_y is not None:
                    y_validation = pd.Series(
                        self._categorical_y.transform(y_validation)
                    )

            if PreprocessingCategorical.CONVERT_ONE_HOT in target_preprocessing:
                if y_validation is not None and self._categorical_y is not None:
                    y_validation = self._categorical_y.transform(
                        pd.DataFrame({"target": y_validation}), "target"
                    )

            if Scale.SCALE_LOG_AND_NORMAL in target_preprocessing:
                if self._scale_y is not None and y_validation is not None:
                    logger.debug("Transform log and normalize")
                    y_validation = pd.DataFrame({"target": y_validation})
                    y_validation = self._scale_y.transform(y_validation)
                    y_validation = y_validation["target"]

            if Scale.SCALE_NORMAL in target_preprocessing:
                if self._scale_y is not None and y_validation is not None:
                    logger.debug("Transform normalize")
                    y_validation = pd.DataFrame({"target": y_validation})
                    y_validation = self._scale_y.transform(y_validation)
                    y_validation = y_validation["target"]

        # columns preprocessing
        if len(self._remove_columns) and X_validation is not None:
            cols_to_remove = [
                col for col in X_validation.columns if col in self._remove_columns
            ]
            X_validation.drop(cols_to_remove, axis=1, inplace=True)

        # text transform
        for tt in self._text_transforms:
            if X_validation is not None and tt is not None:
                X_validation = tt.transform(X_validation)

        for missing in self._missing_values:
            if X_validation is not None and missing is not None:
                X_validation = missing.transform(X_validation)

        # to be sure that all missing are filled
        # in case new data there can be gaps!
        if (
            X_validation is not None
            and pd.isnull(X_validation).sum().sum() > 0
            and len(self._params["columns_preprocessing"]) > 0
        ):
            # there is something missing, fill it
            # we should notice user about it!
            # warnings should go to the separate file ...
            # warnings.warn(
            #    "There are columns {} with missing values which didnt have missing values in train dataset.".format(
            #        list(
            #            X_validation.columns[np.where(np.sum(pd.isnull(X_validation)))]
            #        )
            #    )
            # )
            missing = PreprocessingMissingValues(
                X_validation.columns, PreprocessingMissingValues.FILL_NA_MEDIAN
            )
            missing.fit(X_validation)
            X_validation = missing.transform(X_validation)

        # golden features
        if self._golden_features is not None:
            X_validation = self._golden_features.transform(X_validation)

        if self._kmeans is not None:
            X_validation = self._kmeans.transform(X_validation)

        for convert in self._categorical:
            if X_validation is not None and convert is not None:
                X_validation = convert.transform(X_validation)

        for dtt in self._datetime_transforms:
            if X_validation is not None and dtt is not None:
                X_validation = dtt.transform(X_validation)

        for scale in self._scale:
            if X_validation is not None and scale is not None:
                X_validation = scale.transform(X_validation)

        if self._add_random_feature:
            # -1, 1, with 0 mean
            X_validation["random_feature"] = (
                np.random.rand(X_validation.shape[0]) * 2.0 - 1.0
            )

        if self._drop_features and X_validation is not None:
            X_validation.drop(self._drop_features, axis=1, inplace=True)

        if X_validation is not None:
            # there can be catagorical columns (in CatBoost) which cant be clipped
            numeric_cols = X_validation.select_dtypes(include="number").columns.tolist()
            X_validation[numeric_cols] = X_validation[numeric_cols].clip(
                lower=np.finfo(np.float32).min + 1000,
                upper=np.finfo(np.float32).max - 1000,
            )

        return X_validation, y_validation, sample_weight_validation

    def inverse_scale_target(self, y):
        if self._scale_y is not None:
            y = pd.DataFrame({"target": y})
            y = self._scale_y.inverse_transform(y)
            y = y["target"]
        return y

    def inverse_categorical_target(self, y):
        if self._categorical_y is not None:
            y = self._categorical_y.inverse_transform(y)
            y = y.astype(str)
        return y

    def get_target_class_names(self):
        pos_label, neg_label = "1", "0"
        if self._categorical_y is not None:
            if self._params["ml_task"] == BINARY_CLASSIFICATION:
                # binary classification
                for label, value in self._categorical_y.to_json().items():
                    if value == 1:
                        pos_label = label
                    else:
                        neg_label = label
                return [neg_label, pos_label]
            else:
                # multiclass classification
                # logger.debug(self._categorical_y.to_json())
                if "unique_values" not in self._categorical_y.to_json():
                    labels = dict(
                        (v, k) for k, v in self._categorical_y.to_json().items()
                    )
                else:
                    labels = {
                        i: v
                        for i, v in enumerate(
                            self._categorical_y.to_json()["unique_values"]
                        )
                    }

                return list(labels.values())

        else:  # self._categorical_y is None
            if "ml_task" in self._params:
                if self._params["ml_task"] == BINARY_CLASSIFICATION:
                    return ["0", "1"]
        return []

    def prepare_target_labels(self, y):
        pos_label, neg_label = "1", "0"

        if self._categorical_y is not None:
            if len(y.shape) == 1:
                # binary classification
                for label, value in self._categorical_y.to_json().items():
                    if value == 1:
                        pos_label = label
                    else:
                        neg_label = label
                # threshold is applied in AutoML class
                return pd.DataFrame(
                    {
                        "prediction_{}".format(neg_label): 1 - y,
                        "prediction_{}".format(pos_label): y,
                    }
                )
            else:
                # multiclass classification
                if "unique_values" not in self._categorical_y.to_json():
                    labels = dict(
                        (v, k) for k, v in self._categorical_y.to_json().items()
                    )
                else:
                    labels = {
                        i: v
                        for i, v in enumerate(
                            self._categorical_y.to_json()["unique_values"]
                        )
                    }

                d = {}
                cols = []
                for i in range(y.shape[1]):
                    d["prediction_{}".format(labels[i])] = y[:, i]
                    cols += ["prediction_{}".format(labels[i])]
                df = pd.DataFrame(d)
                df["label"] = np.argmax(np.array(df[cols]), axis=1)

                df["label"] = df["label"].map(labels)

                return df
        else:  # self._categorical_y is None
            if "ml_task" in self._params:
                if self._params["ml_task"] == BINARY_CLASSIFICATION:
                    return pd.DataFrame({"prediction_0": 1 - y, "prediction_1": y})
                elif self._params["ml_task"] == MULTICLASS_CLASSIFICATION:
                    return pd.DataFrame(
                        data=y,
                        columns=["prediction_{}".format(i) for i in range(y.shape[1])],
                    )

        return pd.DataFrame({"prediction": y})

    def to_json(self):
        preprocessing_params = {}
        if self._remove_columns:
            preprocessing_params["remove_columns"] = self._remove_columns
        if self._missing_values is not None and len(self._missing_values):
            mvs = []  # refactor
            for mv in self._missing_values:
                if mv.to_json():
                    mvs += [mv.to_json()]
            if mvs:
                preprocessing_params["missing_values"] = mvs
        if self._categorical is not None and len(self._categorical):
            cats = []  # refactor
            for cat in self._categorical:
                if cat.to_json():
                    cats += [cat.to_json()]
            if cats:
                preprocessing_params["categorical"] = cats

        if self._datetime_transforms is not None and len(self._datetime_transforms):
            dtts = []
            for dtt in self._datetime_transforms:
                dtts += [dtt.to_json()]
            if dtts:
                preprocessing_params["datetime_transforms"] = dtts

        if self._text_transforms is not None and len(self._text_transforms):
            tts = []
            for tt in self._text_transforms:
                tts += [tt.to_json()]
            if tts:
                preprocessing_params["text_transforms"] = tts

        if self._golden_features is not None:
            preprocessing_params["golden_features"] = self._golden_features.to_json()

        if self._kmeans is not None:
            preprocessing_params["kmeans"] = self._kmeans.to_json()

        if self._scale is not None and len(self._scale):
            scs = [sc.to_json() for sc in self._scale if sc.to_json()]
            if scs:
                preprocessing_params["scale"] = scs
        if self._categorical_y is not None:
            cat_y = self._categorical_y.to_json()
            if cat_y:
                preprocessing_params["categorical_y"] = cat_y
        if self._scale_y is not None:
            preprocessing_params["scale_y"] = self._scale_y.to_json()

        if "ml_task" in self._params:
            preprocessing_params["ml_task"] = self._params["ml_task"]

        if self._add_random_feature:
            preprocessing_params["add_random_feature"] = True

        if self._drop_features:
            preprocessing_params["drop_features"] = self._drop_features

        preprocessing_params["params"] = self._params

        return preprocessing_params

    def from_json(self, data_json, results_path):
        self._params = data_json.get("params", self._params)

        if "remove_columns" in data_json:
            self._remove_columns = data_json.get("remove_columns", [])
        if "missing_values" in data_json:
            self._missing_values = []
            for mv_data in data_json["missing_values"]:
                mv = PreprocessingMissingValues()
                mv.from_json(mv_data)
                self._missing_values += [mv]
        if "categorical" in data_json:
            self._categorical = []
            for cat_data in data_json["categorical"]:
                cat = PreprocessingCategorical()
                cat.from_json(cat_data)
                self._categorical += [cat]

        if "datetime_transforms" in data_json:
            self._datetime_transforms = []
            for dtt_params in data_json["datetime_transforms"]:
                dtt = DateTimeTransformer()
                dtt.from_json(dtt_params)
                self._datetime_transforms += [dtt]

        if "text_transforms" in data_json:
            self._text_transforms = []
            for tt_params in data_json["text_transforms"]:
                tt = TextTransformer()
                tt.from_json(tt_params)
                self._text_transforms += [tt]

        if "golden_features" in data_json:
            self._golden_features = GoldenFeaturesTransformer()
            self._golden_features.from_json(data_json["golden_features"], results_path)

        if "kmeans" in data_json:
            self._kmeans = KMeansTransformer()
            self._kmeans.from_json(data_json["kmeans"], results_path)

        if "scale" in data_json:
            self._scale = []
            for scale_data in data_json["scale"]:
                sc = Scale()
                sc.from_json(scale_data)
                self._scale += [sc]
        if "categorical_y" in data_json:
            if "new_columns" in data_json["categorical_y"]:
                self._categorical_y = LabelBinarizer()
            else:
                self._categorical_y = LabelEncoder()

            self._categorical_y.from_json(data_json["categorical_y"])
        if "scale_y" in data_json:
            self._scale_y = Scale()
            self._scale_y.from_json(data_json["scale_y"])
        if "ml_task" in data_json:
            self._params["ml_task"] = data_json["ml_task"]

        self._add_random_feature = data_json.get("add_random_feature", False)
        self._drop_features = data_json.get("drop_features", [])

```

--------------------------------------------------------------------------------
/supervised/automl.py:
--------------------------------------------------------------------------------

```python
import logging

import matplotlib

import warnings

warnings.filterwarnings("ignore", message=".*The 'nopython' keyword.*")

from collections.abc import Iterable

# libraries for type hints
from typing import List, Optional, Union

import numpy
import pandas
from typing_extensions import (
    Literal,
)  # typing_extensions is used for using Literal from python 3.7

from supervised.base_automl import BaseAutoML
from supervised.utils.config import LOG_LEVEL

logging.basicConfig(
    format="%(asctime)s %(name)s %(levelname)s %(message)s", level=logging.ERROR
)
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)


class AutoML(BaseAutoML):

    """
    Automated Machine Learning for supervised tasks (binary classification, multiclass classification, regression).
    """

    def __init__(
        self,
        results_path: Optional[str] = None,
        total_time_limit: int = 60 * 60,
        mode: Literal["Explain", "Perform", "Compete", "Optuna"] = "Explain",
        ml_task: Literal[
            "auto", "binary_classification", "multiclass_classification", "regression"
        ] = "auto",
        model_time_limit: Optional[int] = None,
        algorithms: Union[
            Literal["auto"],
            List[
                Literal[
                    "Baseline",
                    "Linear",
                    "Decision Tree",
                    "Random Forest",
                    "Extra Trees",
                    "LightGBM",
                    "Xgboost",
                    "CatBoost",
                    "Neural Network",
                    "Nearest Neighbors",
                ]
            ],
        ] = "auto",
        train_ensemble: bool = True,
        stack_models: Union[Literal["auto"], bool] = "auto",
        eval_metric: str = "auto",
        validation_strategy: Union[Literal["auto"], dict] = "auto",
        explain_level: Union[Literal["auto"], Literal[0, 1, 2]] = "auto",
        golden_features: Union[Literal["auto"], bool, int] = "auto",
        features_selection: Union[Literal["auto"], bool] = "auto",
        start_random_models: Union[Literal["auto"], int] = "auto",
        hill_climbing_steps: Union[Literal["auto"], int] = "auto",
        top_models_to_improve: Union[Literal["auto"], int] = "auto",
        boost_on_errors: Union[Literal["auto"], bool] = "auto",
        kmeans_features: Union[Literal["auto"], bool] = "auto",
        mix_encoding: Union[Literal["auto"], bool] = "auto",
        max_single_prediction_time: Optional[Union[int, float]] = None,
        optuna_time_budget: Optional[int] = None,
        optuna_init_params: dict = {},
        optuna_verbose: bool = True,
        fairness_metric: str = "auto",
        fairness_threshold: Union[Literal["auto"], float] = "auto",
        privileged_groups: Union[Literal["auto"], list] = "auto",
        underprivileged_groups: Union[Literal["auto"], list] = "auto",
        n_jobs: int = -1,
        verbose: int = 1,
        random_state: int = 1234,
    ):
        """
        Initialize `AutoML` object.

        Arguments:
            results_path (str): The path with results. If None, then the name of directory will be generated with the template: AutoML_{number},
                where the number can be from 1 to 1,000 - depends which direcory name will be available.
                If the `results_path` will point to directory with AutoML results (`params.json` must be present),
                then all models will be loaded.

            total_time_limit (int): The total time limit in seconds for AutoML training.
                It is not used when `model_time_limit` is not `None`.

            mode (str): Can be {`Explain`, `Perform`, `Compete`, `Optuna`}. This parameter defines the goal of AutoML and how intensive the AutoML search will be.

                - `Explain` : To to be used when the user wants to explain and understand the data.
                    - Uses 75%/25% train/test split.
                    - Uses the following models: `Baseline`, `Linear`, `Decision Tree`, `Random Forest`, `XGBoost`, `Neural Network`, and `Ensemble`.
                    - Has full explanations in reports: learning curves, importance plots, and SHAP plots.
                - `Perform` : To be used when the user wants to train a model that will be used in real-life use cases.
                    - Uses 5-fold CV (Cross-Validation).
                    - Uses the following models: `Linear`, `Random Forest`, `LightGBM`, `XGBoost`, `CatBoost`, `Neural Network`, and `Ensemble`.
                    - Has learning curves and importance plots in reports.
                - `Compete` : To be used for machine learning competitions (maximum performance).
                    - Uses 80/20 train/test split, or 5-fold CV, or 10-fold CV (Cross-Validation) - it depends on `total_time_limit`. If not set directly, AutoML will select validation automatically.
                    - Uses the following models: `Decision Tree`, `Random Forest`, `Extra Trees`, `LightGBM`,  `XGBoost`, `CatBoost`, `Neural Network`,
                        `Nearest Neighbors`, `Ensemble`, and `Stacking`.
                    - It has only learning curves in the reports.
                - `Optuna` : To be used for creating highly-tuned machine learning models.
                    - Uses 10-fold CV (Cross-Validation).
                    - It tunes with Optuna the following algorithms: `Random Forest`, `Extra Trees`, `LightGBM`, `XGBoost`, `CatBoost`, `Neural Network`.
                    - It applies `Ensemble` and `Stacking` for trained models.
                    - It has only learning curves in the reports.

            ml_task (str): Can be {"auto", "binary_classification", "multiclass_classification", "regression"}.

                - If left `auto` AutoML will try to guess the task based on target values.
                - If there will be only 2 values in the target, then task will be set to `"binary_classification"`.
                - If number of values in the target will be between 2 and 20 (included), then task will be set to `"multiclass_classification"`.
                - In all other casses, the task is set to `"regression"`.

            model_time_limit (int): The time limit for training a single model, in seconds.
                If `model_time_limit` is set, the `total_time_limit` is not respected.
                The single model can contain several learners. The time limit for subsequent learners is computed based on `model_time_limit`.

                For example, in the case of 10-fold cross-validation, one model will have 10 learners.
                The `model_time_limit` is the time for all 10 learners.

            algorithms (list of str): The list of algorithms that will be used in the training.
                The algorithms can be:

                - `Baseline`,
                - `Linear`,
                - `Decision Tree`,
                - `Random Forest`,
                - `Extra Trees`,
                - `LightGBM`,
                - `Xgboost`,
                - `CatBoost`,
                - `Neural Network`,
                - `Nearest Neighbors`,


            train_ensemble (boolean): Whether an ensemble gets created at the end of the training.

            stack_models (boolean): Whether a models stack gets created at the end of the training. Stack level is 1.

            eval_metric (str): The metric to be used in early stopping and to compare models.

                - for binary classification: `logloss`, `auc`, `f1`, `average_precision`, `accuracy` - default is logloss (if left "auto")
                - for mutliclass classification: `logloss`, `f1`, `accuracy` - default is `logloss` (if left "auto")
                - for regression: `rmse`, `mse`, `mae`, `r2`, `mape`, `spearman`, `pearson` - default is `rmse` (if left "auto")

            validation_strategy (dict): Dictionary with validation type. Right now train/test split and cross-validation are supported.

                Example:

                    Cross-validation exmaple:
                    {
                        "validation_type": "kfold",
                        "k_folds": 5,
                        "shuffle": True,
                        "stratify": True,
                        "random_seed": 123
                    }

                    Train/test example:
                    {
                        "validation_type": "split",
                        "train_ratio": 0.75,
                        "shuffle": True,
                        "stratify": True
                    }

            explain_level (int): The level of explanations included to each model:

                - if `explain_level` is `0` no explanations are produced.
                - if `explain_level` is `1` the following explanations are produced: importance plot (with permutation method), for decision trees produce tree plots, for linear models save coefficients.
                - if `explain_level` is `2` the following explanations are produced: the same as `1` plus SHAP explanations.

                If left `auto` AutoML will produce explanations based on the selected `mode`.

            golden_features (boolean or int): Whether to use golden features (and how many should be added)
                If left `auto` AutoML will use golden features based on the selected `mode`:

                - If `mode` is "Explain", `golden_features` = False.
                - If `mode` is "Perform", `golden_features` = True.
                - If `mode` is "Compete", `golden_features` = True.

                If `boolean` value is set then the number of Golden Features is set automatically.
                It is set to min(100, max(10, 0.1*number_of_input_features)).

                If `int` value is set, the number of Golden Features is set to this value.

            features_selection (boolean): Whether to do features_selection
                If left `auto` AutoML will do feature selection based on the selected `mode`:

                - If `mode` is "Explain", `features_selection` = False.
                - If `mode` is "Perform", `features_selection` = True.
                - If `mode` is "Compete", `features_selection` = True.

            start_random_models (int): Number of starting random models to try.
                If left `auto` AutoML will select it based on the selected `mode`:

                - If `mode` is "Explain", `start_random_models` = 1.
                - If `mode` is "Perform", `start_random_models` = 5.
                - If `mode` is "Compete", `start_random_models` = 10.

            hill_climbing_steps (int): Number of steps to perform during hill climbing.
                If left `auto` AutoML will select it based on the selected `mode`:

                - If `mode` is "Explain", `hill_climbing_steps` = 0.
                - If `mode` is "Perform", `hill_climbing_steps` = 2.
                - If `mode` is "Compete", `hill_climbing_steps` = 2.

            top_models_to_improve (int): Number of best models to improve in `hill_climbing` steps.
                If left `auto` AutoML will select it based on the selected `mode`:

                - If `mode` is "Explain", `top_models_to_improve` = 0.
                - If `mode` is "Perform", `top_models_to_improve` = 2.
                - If `mode` is "Compete", `top_models_to_improve` = 3.

            boost_on_errors (boolean): Whether a model with boost on errors from previous best model should be trained. By default available in the `Compete` mode.

            kmeans_features (boolean): Whether a model with k-means generated features should be trained. By default available in the `Compete` mode.

            mix_encoding (boolean): Whether a model with mixed encoding should be trained. Mixed encoding is the encoding that uses label encoding
                for categoricals with more than 25 categories, and one-hot binary encoding for other categoricals. It is only applied if there are
                categorical features with cardinality smaller than 25. By default it is available in the `Compete` mode.

            max_single_prediction_time (int or float): The limit for prediction time for single sample. Use it if you want to have a model with fast predictions.
                Ideal for creating ML pipelines used as REST API. Time is in seconds. By default (`max_single_prediction_time=None`) models are not optimized for fast predictions,
                except the mode `Perform`. For the mode `Perform` the default is `0.5` seconds.

            optuna_time_budget (int): The time in seconds which should be used by Optuna to tune each algorithm. It is time for tuning single algorithm.
                If you select two algorithms: Xgboost and CatBoost, and set optuna_time_budget=1000, then Xgboost will be tuned for 1000 seconds and CatBoost will be tuned for 1000 seconds.
                What is more, the tuning is made for each data type, for example for raw data and for data with inserted Golden Features.
                This parameter is only used when `mode="Optuna"`. If you set `mode="Optuna"` and forget to set this parameter, it will be set to 3600 seconds.

            optuna_init_params (dict): If you have already tuned parameters from Optuna you can reuse them by setting this parameter.
                This parameter is only used when `mode="Optuna"`. The dict should have structure and params as specified in the MLJAR AutoML .

            optuna_verbose (boolean): If true the Optuna tuning details are displayed. Set to `True` by default.

            fairness_metric (string): Name of fairness metric that will be used for assessing fairness criteria.
                Available metrics for binary and multiclass classification:

                - `demographic_parity_difference`,
                - `demographic_parity_ratio` - default metric,
                - `equalized_odds_difference`,
                - `equalized_odds_ratio`.

                Metrics for regression:

                - `group_loss_difference`,
                - `group_loss_ratio` - default metric.


            fairness_threshold (float): The treshold value for fairness metric.
                The direction optimization (below or above threshold) of fairness metric is determined automatically.

                Default values:

                - for `demographic_parity_difference` the metric value should be below 0.1,
                - for `demographic_parity_ratio` the metric value should be above 0.8,
                - for `equalized_odds_difference` the metric value should be below 0.1,
                - for `equalized_odds_ratio` the metric value shoule be above 0.8.
                - for `group_loss_ratio` the metric value shoule be above 0.8.

                For `group_loss_difference` the default threshold value can't be set because it depends on the dataset.
                If `group_loss_difference` metric is used and `fairness_threshold` is not specified manually, then an exception will be raised.

            privileged_groups (list): The list of privileged groups.

                By default, list of privileged groups are automatically detected based on fairness metrics.
                For example, in binary classification task, a privileged group is the one with the highest selection rate.

                Example value: `[{"sex": "Male"}]`

            underprivileged_groups (list): The list of underprivileged groups.

                By default, list of underprivileged groups are automatically detected based on fairness metrics.
                For example, in binary classification task, an underprivileged group is the one with the lowest selection rate.

                Example value: `[{"sex": "Female"}]`

            n_jobs (int): Number of CPU cores to be used. By default is set to `-1` which means using  all processors.

            verbose (int): Controls the verbosity when fitting and predicting.

                Note:
                    Still not implemented, please left `1`

            random_state (int): Controls the randomness of the `AutoML`


        Examples:

            Binary Classification Example:

            >>> import pandas as pd
            >>> from sklearn.model_selection import train_test_split
            >>> from sklearn.metrics import roc_auc_score
            >>> from supervised import AutoML
            >>> df = pd.read_csv(
            ...        "https://raw.githubusercontent.com/pplonski/datasets-for-start/master/adult/data.csv",
            ...       skipinitialspace=True
            ...    )
            >>> X_train, X_test, y_train, y_test = train_test_split(
            ... df[df.columns[:-1]], df["income"], test_size=0.25
            ... )
            >>> automl = AutoML()
            >>> automl.fit(X_train, y_train)
            >>> y_pred_prob = automl.predict_proba(X_test)
            >>> print(f"AUROC: {roc_auc_score(y_test, y_pred_prob):.2f}%")


            Multi-Class Classification Example:

            >>> import pandas as pd
            >>> from sklearn.datasets import load_digits
            >>> from sklearn.metrics import accuracy_score
            >>> from sklearn.model_selection import train_test_split
            >>> from supervised import AutoML
            >>> digits = load_digits()
            >>> X_train, X_test, y_train, y_test = train_test_split(
            ...     digits.data, digits.target, stratify=digits.target, test_size=0.25,
            ...     random_state=123
            ... )
            >>> automl = AutoML(mode="Perform")
            >>> automl.fit(X_train, y_train)
            >>> y_pred = automl.predict(X_test)
            >>> print(f"Accuracy: {accuracy_score(y_test, y_pred):.2f}%")

            Regression Example:

            >>> import pandas as pd
            >>> from sklearn.datasets import fetch_california_housing
            >>> from sklearn.model_selection import train_test_split
            >>> from sklearn.metrics import mean_squared_error
            >>> from supervised import AutoML
            >>> housing = fetch_california_housing()
            >>> X_train, X_test, y_train, y_test = train_test_split(
            ...       pd.DataFrame(housing.data, columns=housing.feature_names),
            ...       housing.target,
            ...       test_size=0.25,
            ...       random_state=123,
            ... )
            >>> automl = AutoML(mode="Compete")
            >>> automl.fit(X_train, y_train)
            >>> print("Test R^2:", automl.score(X_test, y_test))

            Scikit-learn Pipeline Integration Example:

            >>> from imblearn.over_sampling import RandomOverSampler
            >>> from sklearn.pipeline import make_pipeline
            >>> from sklearn.datasets import make_classification
            >>> from sklearn.model_selection import train_test_split
            >>> from supervised import AutoML
            >>> X, y = make_classification()
            >>> X_train, X_test, y_train, y_test = train_test_split(X,y)
            >>> pipeline = make_pipeline(RandomOverSampler(), AutoML())
            >>> print(pipeline.fit(X_train, y_train).score(X_test, y_test))

        """
        super(AutoML, self).__init__()
        # Set user arguments
        self.mode = mode
        self.ml_task = ml_task
        self.results_path = results_path
        self.total_time_limit = total_time_limit
        self.model_time_limit = model_time_limit
        self.algorithms = algorithms
        self.train_ensemble = train_ensemble
        self.stack_models = stack_models
        self.eval_metric = eval_metric
        self.validation_strategy = validation_strategy
        self.verbose = verbose
        self.explain_level = explain_level
        self.golden_features = golden_features
        self.features_selection = features_selection
        self.start_random_models = start_random_models
        self.hill_climbing_steps = hill_climbing_steps
        self.top_models_to_improve = top_models_to_improve
        self.boost_on_errors = boost_on_errors
        self.kmeans_features = kmeans_features
        self.mix_encoding = mix_encoding
        self.max_single_prediction_time = max_single_prediction_time
        self.optuna_time_budget = optuna_time_budget
        self.optuna_init_params = optuna_init_params
        self.optuna_verbose = optuna_verbose
        self.fairness_metric = fairness_metric
        self.fairness_threshold = fairness_threshold
        self.privileged_groups = privileged_groups
        self.underprivileged_groups = underprivileged_groups
        self.n_jobs = n_jobs
        self.random_state = random_state

    def fit(
        self,
        X: Union[numpy.ndarray, pandas.DataFrame],
        y: Union[numpy.ndarray, pandas.Series],
        sample_weight: Optional[Union[numpy.ndarray, pandas.Series]] = None,
        cv: Optional[Union[Iterable, List]] = None,
        sensitive_features: Optional[
            Union[numpy.ndarray, pandas.Series, pandas.DataFrame]
        ] = None,
    ):
        """Fit the AutoML model.

        Arguments:
            X (numpy.ndarray or pandas.DataFrame): Training data

            y (numpy.ndarray or pandas.Series): Training targets

            sample_weight (numpy.ndarray or pandas.Series): Training sample weights

            cv (iterable or list): List or iterable with (train, validation) splits representing array of indices.
                It is used only with custom validation (`validation_strategy={'validation_type': 'custom'}`).

            sensitive_features (pandas.Series or pandas.DataFrame): Sensitive features to learn fair models

        Returns:
            AutoML object: Returns `self`
        """
        try:
            original_backend = matplotlib.get_backend()
            matplotlib.use("Agg")
            return self._fit(X, y, sample_weight, cv, sensitive_features)
        except Exception as e:
            raise e
        finally:
            matplotlib.use(original_backend)
            try:
                if 'inline' in original_backend:
                    import matplotlib_inline
                    matplotlib_inline.backend_inline._enable_matplotlib_integration()
            except:
                pass


    def predict(self, X: Union[List, numpy.ndarray, pandas.DataFrame]) -> numpy.ndarray:
        """
        Computes predictions from AutoML best model.

        Arguments:
            X (list or numpy.ndarray or pandas.DataFrame):
                Input values to make predictions on.

        Returns:
            numpy.ndarray:

            - One-dimensional array of class labels for classification.
            - One-dimensional array of predictions for regression.

        Raises:
            AutoMLException: Model has not yet been fitted.
        """
        return self._predict(X)

    def predict_proba(
        self, X: Union[List, numpy.ndarray, pandas.DataFrame]
    ) -> numpy.ndarray:
        """
        Computes class probabilities from AutoML best model.
        This method can only be used for classification tasks.

        Arguments:
            X (list or numpy.ndarray or pandas.DataFrame):
                Input values to make predictions on.

        Returns:
            numpy.ndarray of shape (n_samples, n_classes):
                Matrix of containing class probabilities of the input samples

        Raises:
            AutoMLException: Model has not yet been fitted.

        """
        return self._predict_proba(X)

    def predict_all(
        self, X: Union[List, numpy.ndarray, pandas.DataFrame]
    ) -> pandas.DataFrame:
        """
        Computes both class probabilities and class labels for classification tasks.
        Computes predictions for regression tasks.

        Arguments:
            X (list or numpy.ndarray or pandas.DataFrame):
                Input values to make predictions on.

        Returns:
            pandas.Dataframe:
                Dataframe (n_samples, n_classes + 1) containing both class probabilities and class
                labels of the input samples for classification tasks.
                Dataframe with predictions for regression tasks.

        Raises:
            AutoMLException: Model has not yet been fitted.

        """
        return self._predict_all(X)

    def score(
        self,
        X: Union[numpy.ndarray, pandas.DataFrame],
        y: Optional[Union[numpy.ndarray, pandas.Series]] = None,
        sample_weight: Optional[Union[numpy.ndarray, pandas.Series]] = None,
    ) -> float:
        """Calculates a goodness of `fit` for an AutoML instance.

        Arguments:
            X (numpy.ndarray or pandas.DataFrame):
                Test values to make predictions on.

            y (numpy.ndarray or pandas.Series):
                True labels for X.

            sample_weight (numpy.ndarray or pandas.Series):
                Sample weights.
        Returns:
            float: Returns a goodness of fit measure (higher is better):

            - For classification tasks: returns the mean accuracy on the given test data and labels.
            - For regression tasks: returns the R^2 (coefficient of determination) on the given test data and labels.
        """
        return self._score(X, y, sample_weight)

    def report(self, width=900, height=1200):
        return self._report(width, height)

    def need_retrain(
        self,
        X: Union[numpy.ndarray, pandas.DataFrame],
        y: Union[numpy.ndarray, pandas.Series],
        sample_weight: Optional[Union[numpy.ndarray, pandas.Series]] = None,
        decrease: float = 0.1,
    ) -> bool:
        """Decides about model retraining based on new data.

        Arguments:
            X (numpy.ndarray or pandas.DataFrame):
                New data.

            y (numpy.ndarray or pandas.Series):
                True labels for X.

            sample_weight (numpy.ndarray or pandas.Series):
                Sample weights.

            decrease (float): The ratio of change in the performance used as a threshold for retraining decision.
                By default, it is set to `0.1` which means that if the performance of AutoML will decrease by 10%
                on new data then there is a need to retrain. This value should be set depending on your project needs.
                Sometimes, 10% is enough, but for some projects, it can be even lower than 1%.

            Returns:
                boolean: Decides if there is a need to retrain the AutoML.
        """
        return self._need_retrain(X, y, sample_weight, decrease)

```

--------------------------------------------------------------------------------
/tests/data/Titanic/test_with_Survived.csv:
--------------------------------------------------------------------------------

```
PassengerId,Survived,Pclass,Name,Sex,Age,SibSp,Parch,Ticket,Fare,Cabin,Embarked
892,0,3,"Kelly, Mr. James",male,34.5,0,0,330911,7.8292,,Q
893,1,3,"Wilkes, Mrs. James (Ellen Needs)",female,47,1,0,363272,7,,S
894,0,2,"Myles, Mr. Thomas Francis",male,62,0,0,240276,9.6875,,Q
895,0,3,"Wirz, Mr. Albert",male,27,0,0,315154,8.6625,,S
896,1,3,"Hirvonen, Mrs. Alexander (Helga E Lindqvist)",female,22,1,1,3101298,12.2875,,S
897,1,3,"Svensson, Mr. Johan Cervin",male,14,0,0,7538,9.225,,S
898,0,3,"Connolly, Miss. Kate",female,30,0,0,330972,7.6292,,Q
899,1,2,"Caldwell, Mr. Albert Francis",male,26,1,1,248738,29,,S
900,1,3,"Abrahim, Mrs. Joseph (Sophie Halaut Easu)",female,18,0,0,2657,7.2292,,C
901,0,3,"Davies, Mr. John Samuel",male,21,2,0,A/4 48871,24.15,,S
902,0,3,"Ilieff, Mr. Ylio",male,,0,0,349220,7.8958,,S
903,0,1,"Jones, Mr. Charles Cresson",male,46,0,0,694,26,,S
904,1,1,"Snyder, Mrs. John Pillsbury (Nelle Stevenson)",female,23,1,0,21228,82.2667,B45,S
905,0,2,"Howard, Mr. Benjamin",male,63,1,0,24065,26,,S
906,1,1,"Chaffee, Mrs. Herbert Fuller (Carrie Constance Toogood)",female,47,1,0,W.E.P. 5734,61.175,E31,S
907,1,2,"del Carlo, Mrs. Sebastiano (Argenia Genovesi)",female,24,1,0,SC/PARIS 2167,27.7208,,C
908,0,2,"Keane, Mr. Daniel",male,35,0,0,233734,12.35,,Q
909,0,3,"Assaf, Mr. Gerios",male,21,0,0,2692,7.225,,C
910,0,3,"Ilmakangas, Miss. Ida Livija",female,27,1,0,STON/O2. 3101270,7.925,,S
911,1,3,"Assaf Khalil, Mrs. Mariana (Miriam"")""",female,45,0,0,2696,7.225,,C
912,0,1,"Rothschild, Mr. Martin",male,55,1,0,PC 17603,59.4,,C
913,1,3,"Olsen, Master. Artur Karl",male,9,0,1,C 17368,3.1708,,S
914,1,1,"Flegenheim, Mrs. Alfred (Antoinette)",female,,0,0,PC 17598,31.6833,,S
915,1,1,"Williams, Mr. Richard Norris II",male,21,0,1,PC 17597,61.3792,,C
916,1,1,"Ryerson, Mrs. Arthur Larned (Emily Maria Borie)",female,48,1,3,PC 17608,262.375,B57 B59 B63 B66,C
917,0,3,"Robins, Mr. Alexander A",male,50,1,0,A/5. 3337,14.5,,S
918,1,1,"Ostby, Miss. Helene Ragnhild",female,22,0,1,113509,61.9792,B36,C
919,0,3,"Daher, Mr. Shedid",male,22.5,0,0,2698,7.225,,C
920,0,1,"Brady, Mr. John Bertram",male,41,0,0,113054,30.5,A21,S
921,0,3,"Samaan, Mr. Elias",male,,2,0,2662,21.6792,,C
922,0,2,"Louch, Mr. Charles Alexander",male,50,1,0,SC/AH 3085,26,,S
923,0,2,"Jefferys, Mr. Clifford Thomas",male,24,2,0,C.A. 31029,31.5,,S
924,1,3,"Dean, Mrs. Bertram (Eva Georgetta Light)",female,33,1,2,C.A. 2315,20.575,,S
925,0,3,"Johnston, Mrs. Andrew G (Elizabeth Lily"" Watson)""",female,,1,2,W./C. 6607,23.45,,S
926,1,1,"Mock, Mr. Philipp Edmund",male,30,1,0,13236,57.75,C78,C
927,0,3,"Katavelas, Mr. Vassilios (Catavelas Vassilios"")""",male,18.5,0,0,2682,7.2292,,C
928,1,3,"Roth, Miss. Sarah A",female,,0,0,342712,8.05,,S
929,0,3,"Cacic, Miss. Manda",female,21,0,0,315087,8.6625,,S
930,1,3,"Sap, Mr. Julius",male,25,0,0,345768,9.5,,S
931,1,3,"Hee, Mr. Ling",male,,0,0,1601,56.4958,,S
932,1,3,"Karun, Mr. Franz",male,39,0,1,349256,13.4167,,C
933,0,1,"Franklin, Mr. Thomas Parham",male,,0,0,113778,26.55,D34,S
934,0,3,"Goldsmith, Mr. Nathan",male,41,0,0,SOTON/O.Q. 3101263,7.85,,S
935,0,2,"Corbett, Mrs. Walter H (Irene Colvin)",female,30,0,0,237249,13,,S
936,1,1,"Kimball, Mrs. Edwin Nelson Jr (Gertrude Parsons)",female,45,1,0,11753,52.5542,D19,S
937,0,3,"Peltomaki, Mr. Nikolai Johannes",male,25,0,0,STON/O 2. 3101291,7.925,,S
938,1,1,"Chevre, Mr. Paul Romaine",male,45,0,0,PC 17594,29.7,A9,C
939,0,3,"Shaughnessy, Mr. Patrick",male,,0,0,370374,7.75,,Q
940,1,1,"Bucknell, Mrs. William Robert (Emma Eliza Ward)",female,60,0,0,11813,76.2917,D15,C
941,1,3,"Coutts, Mrs. William (Winnie Minnie"" Treanor)""",female,36,0,2,C.A. 37671,15.9,,S
942,0,1,"Smith, Mr. Lucien Philip",male,24,1,0,13695,60,C31,S
943,0,2,"Pulbaum, Mr. Franz",male,27,0,0,SC/PARIS 2168,15.0333,,C
944,1,2,"Hocking, Miss. Ellen Nellie""""",female,20,2,1,29105,23,,S
945,1,1,"Fortune, Miss. Ethel Flora",female,28,3,2,19950,263,C23 C25 C27,S
946,0,2,"Mangiavacchi, Mr. Serafino Emilio",male,,0,0,SC/A.3 2861,15.5792,,C
947,0,3,"Rice, Master. Albert",male,10,4,1,382652,29.125,,Q
948,0,3,"Cor, Mr. Bartol",male,35,0,0,349230,7.8958,,S
949,1,3,"Abelseth, Mr. Olaus Jorgensen",male,25,0,0,348122,7.65,F G63,S
950,0,3,"Davison, Mr. Thomas Henry",male,,1,0,386525,16.1,,S
951,1,1,"Chaudanson, Miss. Victorine",female,36,0,0,PC 17608,262.375,B61,C
952,0,3,"Dika, Mr. Mirko",male,17,0,0,349232,7.8958,,S
953,0,2,"McCrae, Mr. Arthur Gordon",male,32,0,0,237216,13.5,,S
954,0,3,"Bjorklund, Mr. Ernst Herbert",male,18,0,0,347090,7.75,,S
955,1,3,"Bradley, Miss. Bridget Delia",female,22,0,0,334914,7.725,,Q
956,1,1,"Ryerson, Master. John Borie",male,13,2,2,PC 17608,262.375,B57 B59 B63 B66,C
957,0,2,"Corey, Mrs. Percy C (Mary Phyllis Elizabeth Miller)",female,,0,0,F.C.C. 13534,21,,S
958,0,3,"Burns, Miss. Mary Delia",female,18,0,0,330963,7.8792,,Q
959,0,1,"Moore, Mr. Clarence Bloomfield",male,47,0,0,113796,42.4,,S
960,1,1,"Tucker, Mr. Gilbert Milligan Jr",male,31,0,0,2543,28.5375,C53,C
961,1,1,"Fortune, Mrs. Mark (Mary McDougald)",female,60,1,4,19950,263,C23 C25 C27,S
962,1,3,"Mulvihill, Miss. Bertha E",female,24,0,0,382653,7.75,,Q
963,0,3,"Minkoff, Mr. Lazar",male,21,0,0,349211,7.8958,,S
964,0,3,"Nieminen, Miss. Manta Josefina",female,29,0,0,3101297,7.925,,S
965,0,1,"Ovies y Rodriguez, Mr. Servando",male,28.5,0,0,PC 17562,27.7208,D43,C
966,1,1,"Geiger, Miss. Amalie",female,35,0,0,113503,211.5,C130,C
967,0,1,"Keeping, Mr. Edwin",male,32.5,0,0,113503,211.5,C132,C
968,0,3,"Miles, Mr. Frank",male,,0,0,359306,8.05,,S
969,1,1,"Cornell, Mrs. Robert Clifford (Malvina Helen Lamson)",female,55,2,0,11770,25.7,C101,S
970,0,2,"Aldworth, Mr. Charles Augustus",male,30,0,0,248744,13,,S
971,0,3,"Doyle, Miss. Elizabeth",female,24,0,0,368702,7.75,,Q
972,0,3,"Boulos, Master. Akar",male,6,1,1,2678,15.2458,,C
973,0,1,"Straus, Mr. Isidor",male,67,1,0,PC 17483,221.7792,C55 C57,S
974,0,1,"Case, Mr. Howard Brown",male,49,0,0,19924,26,,S
975,0,3,"Demetri, Mr. Marinko",male,,0,0,349238,7.8958,,S
976,0,2,"Lamb, Mr. John Joseph",male,,0,0,240261,10.7083,,Q
977,0,3,"Khalil, Mr. Betros",male,,1,0,2660,14.4542,,C
978,0,3,"Barry, Miss. Julia",female,27,0,0,330844,7.8792,,Q
979,1,3,"Badman, Miss. Emily Louisa",female,18,0,0,A/4 31416,8.05,,S
980,0,3,"O'Donoghue, Ms. Bridget",female,,0,0,364856,7.75,,Q
981,1,2,"Wells, Master. Ralph Lester",male,2,1,1,29103,23,,S
982,1,3,"Dyker, Mrs. Adolf Fredrik (Anna Elisabeth Judith Andersson)",female,22,1,0,347072,13.9,,S
983,0,3,"Pedersen, Mr. Olaf",male,,0,0,345498,7.775,,S
984,1,1,"Davidson, Mrs. Thornton (Orian Hays)",female,27,1,2,F.C. 12750,52,B71,S
985,0,3,"Guest, Mr. Robert",male,,0,0,376563,8.05,,S
986,0,1,"Birnbaum, Mr. Jakob",male,25,0,0,13905,26,,C
987,1,3,"Tenglin, Mr. Gunnar Isidor",male,25,0,0,350033,7.7958,,S
988,1,1,"Cavendish, Mrs. Tyrell William (Julia Florence Siegel)",female,76,1,0,19877,78.85,C46,S
989,0,3,"Makinen, Mr. Kalle Edvard",male,29,0,0,STON/O 2. 3101268,7.925,,S
990,0,3,"Braf, Miss. Elin Ester Maria",female,20,0,0,347471,7.8542,,S
991,0,3,"Nancarrow, Mr. William Henry",male,33,0,0,A./5. 3338,8.05,,S
992,1,1,"Stengel, Mrs. Charles Emil Henry (Annie May Morris)",female,43,1,0,11778,55.4417,C116,C
993,0,2,"Weisz, Mr. Leopold",male,27,1,0,228414,26,,S
994,0,3,"Foley, Mr. William",male,,0,0,365235,7.75,,Q
995,1,3,"Johansson Palmquist, Mr. Oskar Leander",male,26,0,0,347070,7.775,,S
996,1,3,"Thomas, Mrs. Alexander (Thamine Thelma"")""",female,16,1,1,2625,8.5167,,C
997,0,3,"Holthen, Mr. Johan Martin",male,28,0,0,C 4001,22.525,,S
998,1,3,"Buckley, Mr. Daniel",male,21,0,0,330920,7.8208,,Q
999,1,3,"Ryan, Mr. Edward",male,,0,0,383162,7.75,,Q
1000,0,3,"Willer, Mr. Aaron (Abi Weller"")""",male,,0,0,3410,8.7125,,S
1001,0,2,"Swane, Mr. George",male,18.5,0,0,248734,13,F,S
1002,0,2,"Stanton, Mr. Samuel Ward",male,41,0,0,237734,15.0458,,C
1003,1,3,"Shine, Miss. Ellen Natalia",female,,0,0,330968,7.7792,,Q
1004,0,1,"Evans, Miss. Edith Corse",female,36,0,0,PC 17531,31.6792,A29,C
1005,0,3,"Buckley, Miss. Katherine",female,18.5,0,0,329944,7.2833,,Q
1006,0,1,"Straus, Mrs. Isidor (Rosalie Ida Blun)",female,63,1,0,PC 17483,221.7792,C55 C57,S
1007,0,3,"Chronopoulos, Mr. Demetrios",male,18,1,0,2680,14.4542,,C
1008,0,3,"Thomas, Mr. John",male,,0,0,2681,6.4375,,C
1009,1,3,"Sandstrom, Miss. Beatrice Irene",female,1,1,1,PP 9549,16.7,G6,S
1010,0,1,"Beattie, Mr. Thomson",male,36,0,0,13050,75.2417,C6,C
1011,0,2,"Chapman, Mrs. John Henry (Sara Elizabeth Lawry)",female,29,1,0,SC/AH 29037,26,,S
1012,1,2,"Watt, Miss. Bertha J",female,12,0,0,C.A. 33595,15.75,,S
1013,0,3,"Kiernan, Mr. John",male,,1,0,367227,7.75,,Q
1014,1,1,"Schabert, Mrs. Paul (Emma Mock)",female,35,1,0,13236,57.75,C28,C
1015,0,3,"Carver, Mr. Alfred John",male,28,0,0,392095,7.25,,S
1016,1,3,"Kennedy, Mr. John",male,,0,0,368783,7.75,,Q
1017,1,3,"Cribb, Miss. Laura Alice",female,17,0,1,371362,16.1,,S
1018,0,3,"Brobeck, Mr. Karl Rudolf",male,22,0,0,350045,7.7958,,S
1019,1,3,"McCoy, Miss. Alicia",female,,2,0,367226,23.25,,Q
1020,0,2,"Bowenur, Mr. Solomon",male,42,0,0,211535,13,,S
1021,0,3,"Petersen, Mr. Marius",male,24,0,0,342441,8.05,,S
1022,0,3,"Spinner, Mr. Henry John",male,32,0,0,STON/OQ. 369943,8.05,,S
1023,1,1,"Gracie, Col. Archibald IV",male,53,0,0,113780,28.5,C51,C
1024,0,3,"Lefebre, Mrs. Frank (Frances)",female,,0,4,4133,25.4667,,S
1025,0,3,"Thomas, Mr. Charles P",male,,1,0,2621,6.4375,,C
1026,0,3,"Dintcheff, Mr. Valtcho",male,43,0,0,349226,7.8958,,S
1027,0,3,"Carlsson, Mr. Carl Robert",male,24,0,0,350409,7.8542,,S
1028,0,3,"Zakarian, Mr. Mapriededer",male,26.5,0,0,2656,7.225,,C
1029,0,2,"Schmidt, Mr. August",male,26,0,0,248659,13,,S
1030,1,3,"Drapkin, Miss. Jennie",female,23,0,0,SOTON/OQ 392083,8.05,,S
1031,0,3,"Goodwin, Mr. Charles Frederick",male,40,1,6,CA 2144,46.9,,S
1032,0,3,"Goodwin, Miss. Jessie Allis",female,10,5,2,CA 2144,46.9,,S
1033,1,1,"Daniels, Miss. Sarah",female,33,0,0,113781,151.55,,S
1034,0,1,"Ryerson, Mr. Arthur Larned",male,61,1,3,PC 17608,262.375,B57 B59 B63 B66,C
1035,0,2,"Beauchamp, Mr. Henry James",male,28,0,0,244358,26,,S
1036,0,1,"Lindeberg-Lind, Mr. Erik Gustaf (Mr Edward Lingrey"")""",male,42,0,0,17475,26.55,,S
1037,0,3,"Vander Planke, Mr. Julius",male,31,3,0,345763,18,,S
1038,0,1,"Hilliard, Mr. Herbert Henry",male,,0,0,17463,51.8625,E46,S
1039,0,3,"Davies, Mr. Evan",male,22,0,0,SC/A4 23568,8.05,,S
1040,0,1,"Crafton, Mr. John Bertram",male,,0,0,113791,26.55,,S
1041,0,2,"Lahtinen, Rev. William",male,30,1,1,250651,26,,S
1042,1,1,"Earnshaw, Mrs. Boulton (Olive Potter)",female,23,0,1,11767,83.1583,C54,C
1043,0,3,"Matinoff, Mr. Nicola",male,,0,0,349255,7.8958,,C
1044,0,3,"Storey, Mr. Thomas",male,60.5,0,0,3701,,,S
1045,0,3,"Klasen, Mrs. (Hulda Kristina Eugenia Lofqvist)",female,36,0,2,350405,12.1833,,S
1046,0,3,"Asplund, Master. Filip Oscar",male,13,4,2,347077,31.3875,,S
1047,1,3,"Duquemin, Mr. Joseph",male,24,0,0,S.O./P.P. 752,7.55,,S
1048,1,1,"Bird, Miss. Ellen",female,29,0,0,PC 17483,221.7792,C97,S
1049,1,3,"Lundin, Miss. Olga Elida",female,23,0,0,347469,7.8542,,S
1050,0,1,"Borebank, Mr. John James",male,42,0,0,110489,26.55,D22,S
1051,0,3,"Peacock, Mrs. Benjamin (Edith Nile)",female,26,0,2,SOTON/O.Q. 3101315,13.775,,S
1052,1,3,"Smyth, Miss. Julia",female,,0,0,335432,7.7333,,Q
1053,1,3,"Touma, Master. Georges Youssef",male,7,1,1,2650,15.2458,,C
1054,1,2,"Wright, Miss. Marion",female,26,0,0,220844,13.5,,S
1055,0,3,"Pearce, Mr. Ernest",male,,0,0,343271,7,,S
1056,0,2,"Peruschitz, Rev. Joseph Maria",male,41,0,0,237393,13,,S
1057,1,3,"Kink-Heilmann, Mrs. Anton (Luise Heilmann)",female,26,1,1,315153,22.025,,S
1058,0,1,"Brandeis, Mr. Emil",male,48,0,0,PC 17591,50.4958,B10,C
1059,0,3,"Ford, Mr. Edward Watson",male,18,2,2,W./C. 6608,34.375,,S
1060,1,1,"Cassebeer, Mrs. Henry Arthur Jr (Eleanor Genevieve Fosdick)",female,,0,0,17770,27.7208,,C
1061,1,3,"Hellstrom, Miss. Hilda Maria",female,22,0,0,7548,8.9625,,S
1062,0,3,"Lithman, Mr. Simon",male,,0,0,S.O./P.P. 251,7.55,,S
1063,0,3,"Zakarian, Mr. Ortin",male,27,0,0,2670,7.225,,C
1064,0,3,"Dyker, Mr. Adolf Fredrik",male,23,1,0,347072,13.9,,S
1065,0,3,"Torfa, Mr. Assad",male,,0,0,2673,7.2292,,C
1066,0,3,"Asplund, Mr. Carl Oscar Vilhelm Gustafsson",male,40,1,5,347077,31.3875,,S
1067,1,2,"Brown, Miss. Edith Eileen",female,15,0,2,29750,39,,S
1068,1,2,"Sincock, Miss. Maude",female,20,0,0,C.A. 33112,36.75,,S
1069,1,1,"Stengel, Mr. Charles Emil Henry",male,54,1,0,11778,55.4417,C116,C
1070,1,2,"Becker, Mrs. Allen Oliver (Nellie E Baumgardner)",female,36,0,3,230136,39,F4,S
1071,1,1,"Compton, Mrs. Alexander Taylor (Mary Eliza Ingersoll)",female,64,0,2,PC 17756,83.1583,E45,C
1072,0,2,"McCrie, Mr. James Matthew",male,30,0,0,233478,13,,S
1073,0,1,"Compton, Mr. Alexander Taylor Jr",male,37,1,1,PC 17756,83.1583,E52,C
1074,1,1,"Marvin, Mrs. Daniel Warner (Mary Graham Carmichael Farquarson)",female,18,1,0,113773,53.1,D30,S
1075,0,3,"Lane, Mr. Patrick",male,,0,0,7935,7.75,,Q
1076,1,1,"Douglas, Mrs. Frederick Charles (Mary Helene Baxter)",female,27,1,1,PC 17558,247.5208,B58 B60,C
1077,0,2,"Maybery, Mr. Frank Hubert",male,40,0,0,239059,16,,S
1078,1,2,"Phillips, Miss. Alice Frances Louisa",female,21,0,1,S.O./P.P. 2,21,,S
1079,0,3,"Davies, Mr. Joseph",male,17,2,0,A/4 48873,8.05,,S
1080,0,3,"Sage, Miss. Ada",female,,8,2,CA. 2343,69.55,,S
1081,0,2,"Veal, Mr. James",male,40,0,0,28221,13,,S
1082,0,2,"Angle, Mr. William A",male,34,1,0,226875,26,,S
1083,1,1,"Salomon, Mr. Abraham L",male,,0,0,111163,26,,S
1084,0,3,"van Billiard, Master. Walter John",male,11.5,1,1,A/5. 851,14.5,,S
1085,0,2,"Lingane, Mr. John",male,61,0,0,235509,12.35,,Q
1086,1,2,"Drew, Master. Marshall Brines",male,8,0,2,28220,32.5,,S
1087,0,3,"Karlsson, Mr. Julius Konrad Eugen",male,33,0,0,347465,7.8542,,S
1088,1,1,"Spedden, Master. Robert Douglas",male,6,0,2,16966,134.5,E34,C
1089,1,3,"Nilsson, Miss. Berta Olivia",female,18,0,0,347066,7.775,,S
1090,0,2,"Baimbrigge, Mr. Charles Robert",male,23,0,0,C.A. 31030,10.5,,S
1091,0,3,"Rasmussen, Mrs. (Lena Jacobsen Solvang)",female,,0,0,65305,8.1125,,S
1092,1,3,"Murphy, Miss. Nora",female,,0,0,36568,15.5,,Q
1093,0,3,"Danbom, Master. Gilbert Sigvard Emanuel",male,0.33,0,2,347080,14.4,,S
1094,0,1,"Astor, Col. John Jacob",male,47,1,0,PC 17757,227.525,C62 C64,C
1095,1,2,"Quick, Miss. Winifred Vera",female,8,1,1,26360,26,,S
1096,0,2,"Andrew, Mr. Frank Thomas",male,25,0,0,C.A. 34050,10.5,,S
1097,1,1,"Omont, Mr. Alfred Fernand",male,,0,0,F.C. 12998,25.7417,,C
1098,0,3,"McGowan, Miss. Katherine",female,35,0,0,9232,7.75,,Q
1099,1,2,"Collett, Mr. Sidney C Stuart",male,24,0,0,28034,10.5,,S
1100,1,1,"Rosenbaum, Miss. Edith Louise",female,33,0,0,PC 17613,27.7208,A11,C
1101,0,3,"Delalic, Mr. Redjo",male,25,0,0,349250,7.8958,,S
1102,0,3,"Andersen, Mr. Albert Karvin",male,32,0,0,C 4001,22.525,,S
1103,1,3,"Finoli, Mr. Luigi",male,,0,0,SOTON/O.Q. 3101308,7.05,,S
1104,0,2,"Deacon, Mr. Percy William",male,17,0,0,S.O.C. 14879,73.5,,S
1105,0,2,"Howard, Mrs. Benjamin (Ellen Truelove Arman)",female,60,1,0,24065,26,,S
1106,0,3,"Andersson, Miss. Ida Augusta Margareta",female,38,4,2,347091,7.775,,S
1107,0,1,"Head, Mr. Christopher",male,42,0,0,113038,42.5,B11,S
1108,0,3,"Mahon, Miss. Bridget Delia",female,,0,0,330924,7.8792,,Q
1109,0,1,"Wick, Mr. George Dennick",male,57,1,1,36928,164.8667,,S
1110,1,1,"Widener, Mrs. George Dunton (Eleanor Elkins)",female,50,1,1,113503,211.5,C80,C
1111,0,3,"Thomson, Mr. Alexander Morrison",male,,0,0,32302,8.05,,S
1112,1,2,"Duran y More, Miss. Florentina",female,30,1,0,SC/PARIS 2148,13.8583,,C
1113,0,3,"Reynolds, Mr. Harold J",male,21,0,0,342684,8.05,,S
1114,1,2,"Cook, Mrs. (Selena Rogers)",female,22,0,0,W./C. 14266,10.5,F33,S
1115,1,3,"Karlsson, Mr. Einar Gervasius",male,21,0,0,350053,7.7958,,S
1116,1,1,"Candee, Mrs. Edward (Helen Churchill Hungerford)",female,53,0,0,PC 17606,27.4458,,C
1117,1,3,"Moubarek, Mrs. George (Omine Amenia"" Alexander)""",female,,0,2,2661,15.2458,,C
1118,1,3,"Asplund, Mr. Johan Charles",male,23,0,0,350054,7.7958,,S
1119,0,3,"McNeill, Miss. Bridget",female,,0,0,370368,7.75,,Q
1120,0,3,"Everett, Mr. Thomas James",male,40.5,0,0,C.A. 6212,15.1,,S
1121,0,2,"Hocking, Mr. Samuel James Metcalfe",male,36,0,0,242963,13,,S
1122,0,2,"Sweet, Mr. George Frederick",male,14,0,0,220845,65,,S
1123,1,1,"Willard, Miss. Constance",female,21,0,0,113795,26.55,,S
1124,0,3,"Wiklund, Mr. Karl Johan",male,21,1,0,3101266,6.4958,,S
1125,0,3,"Linehan, Mr. Michael",male,,0,0,330971,7.8792,,Q
1126,0,1,"Cumings, Mr. John Bradley",male,39,1,0,PC 17599,71.2833,C85,C
1127,0,3,"Vendel, Mr. Olof Edvin",male,20,0,0,350416,7.8542,,S
1128,0,1,"Warren, Mr. Frank Manley",male,64,1,0,110813,75.25,D37,C
1129,0,3,"Baccos, Mr. Raffull",male,20,0,0,2679,7.225,,C
1130,0,2,"Hiltunen, Miss. Marta",female,18,1,1,250650,13,,S
1131,1,1,"Douglas, Mrs. Walter Donald (Mahala Dutton)",female,48,1,0,PC 17761,106.425,C86,C
1132,1,1,"Lindstrom, Mrs. Carl Johan (Sigrid Posse)",female,55,0,0,112377,27.7208,,C
1133,1,2,"Christy, Mrs. (Alice Frances)",female,45,0,2,237789,30,,S
1134,1,1,"Spedden, Mr. Frederic Oakley",male,45,1,1,16966,134.5,E34,C
1135,1,3,"Hyman, Mr. Abraham",male,,0,0,3470,7.8875,,S
1136,0,3,"Johnston, Master. William Arthur Willie""""",male,,1,2,W./C. 6607,23.45,,S
1137,0,1,"Kenyon, Mr. Frederick R",male,41,1,0,17464,51.8625,D21,S
1138,0,2,"Karnes, Mrs. J Frank (Claire Bennett)",female,22,0,0,F.C.C. 13534,21,,S
1139,0,2,"Drew, Mr. James Vivian",male,42,1,1,28220,32.5,,S
1140,1,2,"Hold, Mrs. Stephen (Annie Margaret Hill)",female,29,1,0,26707,26,,S
1141,0,3,"Khalil, Mrs. Betros (Zahie Maria"" Elias)""",female,,1,0,2660,14.4542,,C
1142,1,2,"West, Miss. Barbara J",female,0.92,1,2,C.A. 34651,27.75,,S
1143,1,3,"Abrahamsson, Mr. Abraham August Johannes",male,20,0,0,SOTON/O2 3101284,7.925,,S
1144,0,1,"Clark, Mr. Walter Miller",male,27,1,0,13508,136.7792,C89,C
1145,0,3,"Salander, Mr. Karl Johan",male,24,0,0,7266,9.325,,S
1146,0,3,"Wenzel, Mr. Linhart",male,32.5,0,0,345775,9.5,,S
1147,0,3,"MacKay, Mr. George William",male,,0,0,C.A. 42795,7.55,,S
1148,0,3,"Mahon, Mr. John",male,,0,0,AQ/4 3130,7.75,,Q
1149,0,3,"Niklasson, Mr. Samuel",male,28,0,0,363611,8.05,,S
1150,1,2,"Bentham, Miss. Lilian W",female,19,0,0,28404,13,,S
1151,1,3,"Midtsjo, Mr. Karl Albert",male,21,0,0,345501,7.775,,S
1152,1,3,"de Messemaeker, Mr. Guillaume Joseph",male,36.5,1,0,345572,17.4,,S
1153,0,3,"Nilsson, Mr. August Ferdinand",male,21,0,0,350410,7.8542,,S
1154,1,2,"Wells, Mrs. Arthur Henry (Addie"" Dart Trevaskis)""",female,29,0,2,29103,23,,S
1155,0,3,"Klasen, Miss. Gertrud Emilia",female,1,1,1,350405,12.1833,,S
1156,1,2,"Portaluppi, Mr. Emilio Ilario Giuseppe",male,30,0,0,C.A. 34644,12.7375,,C
1157,0,3,"Lyntakoff, Mr. Stanko",male,,0,0,349235,7.8958,,S
1158,0,1,"Chisholm, Mr. Roderick Robert Crispin",male,,0,0,112051,0,,S
1159,0,3,"Warren, Mr. Charles William",male,,0,0,C.A. 49867,7.55,,S
1160,1,3,"Howard, Miss. May Elizabeth",female,,0,0,A. 2. 39186,8.05,,S
1161,0,3,"Pokrnic, Mr. Mate",male,17,0,0,315095,8.6625,,S
1162,0,1,"McCaffry, Mr. Thomas Francis",male,46,0,0,13050,75.2417,C6,C
1163,0,3,"Fox, Mr. Patrick",male,,0,0,368573,7.75,,Q
1164,1,1,"Clark, Mrs. Walter Miller (Virginia McDowell)",female,26,1,0,13508,136.7792,C89,C
1165,0,3,"Lennon, Miss. Mary",female,,1,0,370371,15.5,,Q
1166,0,3,"Saade, Mr. Jean Nassr",male,,0,0,2676,7.225,,C
1167,1,2,"Bryhl, Miss. Dagmar Jenny Ingeborg ",female,20,1,0,236853,26,,S
1168,0,2,"Parker, Mr. Clifford Richard",male,28,0,0,SC 14888,10.5,,S
1169,0,2,"Faunthorpe, Mr. Harry",male,40,1,0,2926,26,,S
1170,0,2,"Ware, Mr. John James",male,30,1,0,CA 31352,21,,S
1171,1,2,"Oxenham, Mr. Percy Thomas",male,22,0,0,W./C. 14260,10.5,,S
1172,0,3,"Oreskovic, Miss. Jelka",female,23,0,0,315085,8.6625,,S
1173,0,3,"Peacock, Master. Alfred Edward",male,0.75,1,1,SOTON/O.Q. 3101315,13.775,,S
1174,0,3,"Fleming, Miss. Honora",female,,0,0,364859,7.75,,Q
1175,1,3,"Touma, Miss. Maria Youssef",female,9,1,1,2650,15.2458,,C
1176,0,3,"Rosblom, Miss. Salli Helena",female,2,1,1,370129,20.2125,,S
1177,0,3,"Dennis, Mr. William",male,36,0,0,A/5 21175,7.25,,S
1178,0,3,"Franklin, Mr. Charles (Charles Fardon)",male,,0,0,SOTON/O.Q. 3101314,7.25,,S
1179,1,1,"Snyder, Mr. John Pillsbury",male,24,1,0,21228,82.2667,B45,S
1180,0,3,"Mardirosian, Mr. Sarkis",male,,0,0,2655,7.2292,F E46,C
1181,0,3,"Ford, Mr. Arthur",male,,0,0,A/5 1478,8.05,,S
1182,1,1,"Rheims, Mr. George Alexander Lucien",male,,0,0,PC 17607,39.6,,S
1183,1,3,"Daly, Miss. Margaret Marcella Maggie""""",female,30,0,0,382650,6.95,,Q
1184,0,3,"Nasr, Mr. Mustafa",male,,0,0,2652,7.2292,,C
1185,1,1,"Dodge, Dr. Washington",male,53,1,1,33638,81.8583,A34,S
1186,0,3,"Wittevrongel, Mr. Camille",male,36,0,0,345771,9.5,,S
1187,0,3,"Angheloff, Mr. Minko",male,26,0,0,349202,7.8958,,S
1188,1,2,"Laroche, Miss. Louise",female,1,1,2,SC/Paris 2123,41.5792,,C
1189,0,3,"Samaan, Mr. Hanna",male,,2,0,2662,21.6792,,C
1190,0,1,"Loring, Mr. Joseph Holland",male,30,0,0,113801,45.5,,S
1191,0,3,"Johansson, Mr. Nils",male,29,0,0,347467,7.8542,,S
1192,1,3,"Olsson, Mr. Oscar Wilhelm",male,32,0,0,347079,7.775,,S
1193,0,2,"Malachard, Mr. Noel",male,,0,0,237735,15.0458,D,C
1194,0,2,"Phillips, Mr. Escott Robert",male,43,0,1,S.O./P.P. 2,21,,S
1195,0,3,"Pokrnic, Mr. Tome",male,24,0,0,315092,8.6625,,S
1196,1,3,"McCarthy, Miss. Catherine Katie""""",female,,0,0,383123,7.75,,Q
1197,1,1,"Crosby, Mrs. Edward Gifford (Catherine Elizabeth Halstead)",female,64,1,1,112901,26.55,B26,S
1198,0,1,"Allison, Mr. Hudson Joshua Creighton",male,30,1,2,113781,151.55,C22 C26,S
1199,1,3,"Aks, Master. Philip Frank",male,0.83,0,1,392091,9.35,,S
1200,0,1,"Hays, Mr. Charles Melville",male,55,1,1,12749,93.5,B69,S
1201,1,3,"Hansen, Mrs. Claus Peter (Jennie L Howard)",female,45,1,0,350026,14.1083,,S
1202,0,3,"Cacic, Mr. Jego Grga",male,18,0,0,315091,8.6625,,S
1203,1,3,"Vartanian, Mr. David",male,22,0,0,2658,7.225,,C
1204,0,3,"Sadowitz, Mr. Harry",male,,0,0,LP 1588,7.575,,S
1205,0,3,"Carr, Miss. Jeannie",female,37,0,0,368364,7.75,,Q
1206,1,1,"White, Mrs. John Stuart (Ella Holmes)",female,55,0,0,PC 17760,135.6333,C32,C
1207,0,3,"Hagardon, Miss. Kate",female,17,0,0,AQ/3. 30631,7.7333,,Q
1208,0,1,"Spencer, Mr. William Augustus",male,57,1,0,PC 17569,146.5208,B78,C
1209,0,2,"Rogers, Mr. Reginald Harry",male,19,0,0,28004,10.5,,S
1210,0,3,"Jonsson, Mr. Nils Hilding",male,27,0,0,350408,7.8542,,S
1211,0,2,"Jefferys, Mr. Ernest Wilfred",male,22,2,0,C.A. 31029,31.5,,S
1212,0,3,"Andersson, Mr. Johan Samuel",male,26,0,0,347075,7.775,,S
1213,1,3,"Krekorian, Mr. Neshan",male,25,0,0,2654,7.2292,F E57,C
1214,0,2,"Nesson, Mr. Israel",male,26,0,0,244368,13,F2,S
1215,0,1,"Rowe, Mr. Alfred G",male,33,0,0,113790,26.55,,S
1216,1,1,"Kreuchen, Miss. Emilie",female,39,0,0,24160,211.3375,,S
1217,0,3,"Assam, Mr. Ali",male,23,0,0,SOTON/O.Q. 3101309,7.05,,S
1218,1,2,"Becker, Miss. Ruth Elizabeth",female,12,2,1,230136,39,F4,S
1219,0,1,"Rosenshine, Mr. George (Mr George Thorne"")""",male,46,0,0,PC 17585,79.2,,C
1220,0,2,"Clarke, Mr. Charles Valentine",male,29,1,0,2003,26,,S
1221,0,2,"Enander, Mr. Ingvar",male,21,0,0,236854,13,,S
1222,1,2,"Davies, Mrs. John Morgan (Elizabeth Agnes Mary White) ",female,48,0,2,C.A. 33112,36.75,,S
1223,0,1,"Dulles, Mr. William Crothers",male,39,0,0,PC 17580,29.7,A18,C
1224,0,3,"Thomas, Mr. Tannous",male,,0,0,2684,7.225,,C
1225,1,3,"Nakid, Mrs. Said (Waika Mary"" Mowad)""",female,19,1,1,2653,15.7417,,C
1226,0,3,"Cor, Mr. Ivan",male,27,0,0,349229,7.8958,,S
1227,0,1,"Maguire, Mr. John Edward",male,30,0,0,110469,26,C106,S
1228,0,2,"de Brito, Mr. Jose Joaquim",male,32,0,0,244360,13,,S
1229,0,3,"Elias, Mr. Joseph",male,39,0,2,2675,7.2292,,C
1230,0,2,"Denbury, Mr. Herbert",male,25,0,0,C.A. 31029,31.5,,S
1231,0,3,"Betros, Master. Seman",male,,0,0,2622,7.2292,,C
1232,0,2,"Fillbrook, Mr. Joseph Charles",male,18,0,0,C.A. 15185,10.5,,S
1233,1,3,"Lundstrom, Mr. Thure Edvin",male,32,0,0,350403,7.5792,,S
1234,0,3,"Sage, Mr. John George",male,,1,9,CA. 2343,69.55,,S
1235,1,1,"Cardeza, Mrs. James Warburton Martinez (Charlotte Wardle Drake)",female,58,0,1,PC 17755,512.3292,B51 B53 B55,C
1236,0,3,"van Billiard, Master. James William",male,,1,1,A/5. 851,14.5,,S
1237,1,3,"Abelseth, Miss. Karen Marie",female,16,0,0,348125,7.65,,S
1238,0,2,"Botsford, Mr. William Hull",male,26,0,0,237670,13,,S
1239,1,3,"Whabee, Mrs. George Joseph (Shawneene Abi-Saab)",female,38,0,0,2688,7.2292,,C
1240,0,2,"Giles, Mr. Ralph",male,24,0,0,248726,13.5,,S
1241,1,2,"Walcroft, Miss. Nellie",female,31,0,0,F.C.C. 13528,21,,S
1242,1,1,"Greenfield, Mrs. Leo David (Blanche Strouse)",female,45,0,1,PC 17759,63.3583,D10 D12,C
1243,0,2,"Stokes, Mr. Philip Joseph",male,25,0,0,F.C.C. 13540,10.5,,S
1244,0,2,"Dibden, Mr. William",male,18,0,0,S.O.C. 14879,73.5,,S
1245,0,2,"Herman, Mr. Samuel",male,49,1,2,220845,65,,S
1246,1,3,"Dean, Miss. Elizabeth Gladys Millvina""""",female,0.17,1,2,C.A. 2315,20.575,,S
1247,0,1,"Julian, Mr. Henry Forbes",male,50,0,0,113044,26,E60,S
1248,1,1,"Brown, Mrs. John Murray (Caroline Lane Lamson)",female,59,2,0,11769,51.4792,C101,S
1249,0,3,"Lockyer, Mr. Edward",male,,0,0,1222,7.8792,,S
1250,1,3,"O'Keefe, Mr. Patrick",male,,0,0,368402,7.75,,Q
1251,0,3,"Lindell, Mrs. Edvard Bengtsson (Elin Gerda Persson)",female,30,1,0,349910,15.55,,S
1252,0,3,"Sage, Master. William Henry",male,14.5,8,2,CA. 2343,69.55,,S
1253,1,2,"Mallet, Mrs. Albert (Antoinette Magnin)",female,24,1,1,S.C./PARIS 2079,37.0042,,C
1254,1,2,"Ware, Mrs. John James (Florence Louise Long)",female,31,0,0,CA 31352,21,,S
1255,0,3,"Strilic, Mr. Ivan",male,27,0,0,315083,8.6625,,S
1256,1,1,"Harder, Mrs. George Achilles (Dorothy Annan)",female,25,1,0,11765,55.4417,E50,C
1257,0,3,"Sage, Mrs. John (Annie Bullen)",female,,1,9,CA. 2343,69.55,,S
1258,0,3,"Caram, Mr. Joseph",male,,1,0,2689,14.4583,,C
1259,0,3,"Riihivouri, Miss. Susanna Juhantytar Sanni""""",female,22,0,0,3101295,39.6875,,S
1260,1,1,"Gibson, Mrs. Leonard (Pauline C Boeson)",female,45,0,1,112378,59.4,,C
1261,1,2,"Pallas y Castello, Mr. Emilio",male,29,0,0,SC/PARIS 2147,13.8583,,C
1262,0,2,"Giles, Mr. Edgar",male,21,1,0,28133,11.5,,S
1263,1,1,"Wilson, Miss. Helen Alice",female,31,0,0,16966,134.5,E39 E41,C
1264,1,1,"Ismay, Mr. Joseph Bruce",male,49,0,0,112058,0,B52 B54 B56,S
1265,0,2,"Harbeck, Mr. William H",male,44,0,0,248746,13,,S
1266,1,1,"Dodge, Mrs. Washington (Ruth Vidaver)",female,54,1,1,33638,81.8583,A34,S
1267,1,1,"Bowen, Miss. Grace Scott",female,45,0,0,PC 17608,262.375,,C
1268,0,3,"Kink, Miss. Maria",female,22,2,0,315152,8.6625,,S
1269,0,2,"Cotterill, Mr. Henry Harry""""",male,21,0,0,29107,11.5,,S
1270,0,1,"Hipkins, Mr. William Edward",male,55,0,0,680,50,C39,S
1271,0,3,"Asplund, Master. Carl Edgar",male,5,4,2,347077,31.3875,,S
1272,0,3,"O'Connor, Mr. Patrick",male,,0,0,366713,7.75,,Q
1273,0,3,"Foley, Mr. Joseph",male,26,0,0,330910,7.8792,,Q
1274,0,3,"Risien, Mrs. Samuel (Emma)",female,,0,0,364498,14.5,,S
1275,0,3,"McNamee, Mrs. Neal (Eileen O'Leary)",female,19,1,0,376566,16.1,,S
1276,0,2,"Wheeler, Mr. Edwin Frederick""""",male,,0,0,SC/PARIS 2159,12.875,,S
1277,1,2,"Herman, Miss. Kate",female,24,1,2,220845,65,,S
1278,0,3,"Aronsson, Mr. Ernst Axel Algot",male,24,0,0,349911,7.775,,S
1279,0,2,"Ashby, Mr. John",male,57,0,0,244346,13,,S
1280,0,3,"Canavan, Mr. Patrick",male,21,0,0,364858,7.75,,Q
1281,0,3,"Palsson, Master. Paul Folke",male,6,3,1,349909,21.075,,S
1282,0,1,"Payne, Mr. Vivian Ponsonby",male,23,0,0,12749,93.5,B24,S
1283,1,1,"Lines, Mrs. Ernest H (Elizabeth Lindsey James)",female,51,0,1,PC 17592,39.4,D28,S
1284,0,3,"Abbott, Master. Eugene Joseph",male,13,0,2,C.A. 2673,20.25,,S
1285,0,2,"Gilbert, Mr. William",male,47,0,0,C.A. 30769,10.5,,S
1286,1,3,"Kink-Heilmann, Mr. Anton",male,29,3,1,315153,22.025,,S
1287,1,1,"Smith, Mrs. Lucien Philip (Mary Eloise Hughes)",female,18,1,0,13695,60,C31,S
1288,0,3,"Colbert, Mr. Patrick",male,24,0,0,371109,7.25,,Q
1289,1,1,"Frolicher-Stehli, Mrs. Maxmillian (Margaretha Emerentia Stehli)",female,48,1,1,13567,79.2,B41,C
1290,0,3,"Larsson-Rondberg, Mr. Edvard A",male,22,0,0,347065,7.775,,S
1291,0,3,"Conlon, Mr. Thomas Henry",male,31,0,0,21332,7.7333,,Q
1292,1,1,"Bonnell, Miss. Caroline",female,30,0,0,36928,164.8667,C7,S
1293,0,2,"Gale, Mr. Harry",male,38,1,0,28664,21,,S
1294,1,1,"Gibson, Miss. Dorothy Winifred",female,22,0,1,112378,59.4,,C
1295,0,1,"Carrau, Mr. Jose Pedro",male,17,0,0,113059,47.1,,S
1296,1,1,"Frauenthal, Mr. Isaac Gerald",male,43,1,0,17765,27.7208,D40,C
1297,1,2,"Nourney, Mr. Alfred (Baron von Drachstedt"")""",male,20,0,0,SC/PARIS 2166,13.8625,D38,C
1298,0,2,"Ware, Mr. William Jeffery",male,23,1,0,28666,10.5,,S
1299,0,1,"Widener, Mr. George Dunton",male,50,1,1,113503,211.5,C80,C
1300,1,3,"Riordan, Miss. Johanna Hannah""""",female,,0,0,334915,7.7208,,Q
1301,0,3,"Peacock, Miss. Treasteall",female,3,1,1,SOTON/O.Q. 3101315,13.775,,S
1302,0,3,"Naughton, Miss. Hannah",female,,0,0,365237,7.75,,Q
1303,1,1,"Minahan, Mrs. William Edward (Lillian E Thorpe)",female,37,1,0,19928,90,C78,Q
1304,0,3,"Henriksson, Miss. Jenny Lovisa",female,28,0,0,347086,7.775,,S
1305,0,3,"Spector, Mr. Woolf",male,,0,0,A.5. 3236,8.05,,S
1306,1,1,"Oliva y Ocana, Dona. Fermina",female,39,0,0,PC 17758,108.9,C105,C
1307,0,3,"Saether, Mr. Simon Sivertsen",male,38.5,0,0,SOTON/O.Q. 3101262,7.25,,S
1308,0,3,"Ware, Mr. Frederick",male,,0,0,359309,8.05,,S
1309,1,3,"Peter, Master. Michael J",male,,1,1,2668,22.3583,,C

```

--------------------------------------------------------------------------------
/supervised/model_framework.py:
--------------------------------------------------------------------------------

```python
import copy
import gc
import json
import logging
import os
import time
import uuid

import numpy as np
import pandas as pd

from supervised.algorithms.factory import AlgorithmFactory
from supervised.algorithms.registry import (
    BINARY_CLASSIFICATION,
    MULTICLASS_CLASSIFICATION,
    REGRESSION,
    AlgorithmsRegistry,
)
from supervised.callbacks.callback_list import CallbackList
from supervised.exceptions import AutoMLException
from supervised.preprocessing.preprocessing import Preprocessing
from supervised.utils.additional_metrics import AdditionalMetrics
from supervised.utils.config import LOG_LEVEL
from supervised.utils.jsonencoder import MLJSONEncoder
from supervised.utils.metric import Metric
from supervised.validation.validation_step import ValidationStep

logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)

import joblib

from supervised.tuner.optuna.tuner import OptunaTuner
from supervised.utils.learning_curves import LearningCurves


class ModelFramework:
    def __init__(self, params, callbacks=[]):
        logger.debug("ModelFramework.__init__")
        self.uid = str(uuid.uuid4())

        for i in ["learner", "validation_strategy"]:  # mandatory parameters
            if i not in params:
                msg = "Missing {0} parameter in ModelFramework params".format(i)
                logger.error(msg)
                raise ValueError(msg)

        self.params = params
        self.callbacks = CallbackList(callbacks)

        self._name = params.get("name", "model")
        self.additional_params = params.get("additional")
        self.preprocessing_params = params.get("preprocessing")
        self.validation_params = params.get("validation_strategy")
        self.learner_params = params.get("learner")

        self._ml_task = params.get("ml_task")
        self._explain_level = params.get("explain_level")
        self._is_stacked = params.get("is_stacked", False)

        self.validation = None
        self.preprocessings = []
        self.learners = []

        self.train_time = None
        self.final_loss = None
        self.metric_name = None
        self.oof_predictions = None
        self._additional_metrics = None
        self._threshold = None  # used only for binary classifiers
        self._max_time_for_learner = params.get("max_time_for_learner", 3600)
        self._oof_predictions_fname = None
        self._single_prediction_time = None  # prediction time on single sample
        self._optuna_time_budget = params.get("optuna_time_budget")
        self._optuna_init_params = params.get("optuna_init_params", {})
        self._optuna_verbose = params.get("optuna_verbose", True)

        self._fairness_metric = params.get("fairness_metric")
        self._fairness_threshold = params.get("fairness_threshold")
        self._privileged_groups = params.get("privileged_groups", [])
        self._underprivileged_groups = params.get("underprivileged_groups", [])
        self._fairness_optimization = params.get("fairness_optimization")
        self._is_fair = None

        # the automl random state from AutoML constructor, used in Optuna optimizer
        self._automl_random_state = params.get("automl_random_state", 42)

    def get_train_time(self):
        return self.train_time

    def predictions(
        self,
        learner,
        preproces,
        X_train,
        y_train,
        sample_weight,
        sensitive_features,
        X_validation,
        y_validation,
        sample_weight_validation,
        sensitive_features_validation,
    ):
        y_train_true = y_train
        y_train_predicted = learner.predict(X_train)
        y_validation_true = y_validation
        y_validation_predicted = learner.predict(X_validation)

        y_train_true = preproces.inverse_scale_target(y_train_true)
        y_train_predicted = preproces.inverse_scale_target(y_train_predicted)
        y_validation_true = preproces.inverse_scale_target(y_validation_true)
        y_validation_predicted = preproces.inverse_scale_target(y_validation_predicted)

        y_validation_columns = []
        if self._ml_task == MULTICLASS_CLASSIFICATION:
            # y_train_true = preproces.inverse_categorical_target(y_train_true)
            # y_validation_true = preproces.inverse_categorical_target(y_validation_true)
            # get columns, omit the last one (it is label)
            y_validation_columns = preproces.prepare_target_labels(
                y_validation_predicted
            ).columns.tolist()[:-1]
        elif self._ml_task == BINARY_CLASSIFICATION:
            class_names = self.preprocessings[-1].get_target_class_names()
            y_validation_columns = "prediction"
            if not ("0" in class_names and "1" in class_names):
                y_validation_columns = (
                    f"prediction_0_for_{class_names[0]}_1_for_{class_names[1]}"
                )
        else:
            y_validation_columns = "prediction"

        return {
            "y_train_true": y_train_true,
            "y_train_predicted": y_train_predicted,
            "sample_weight": sample_weight,
            "sensitive_features": sensitive_features,
            "y_validation_true": y_validation_true,
            "y_validation_predicted": y_validation_predicted,
            "sample_weight_validation": sample_weight_validation,
            "sensitive_features_validation": sensitive_features_validation,
            "validation_index": X_validation.index,
            "validation_columns": y_validation_columns,
        }

    def train(self, results_path, model_subpath):
        logger.debug(f"ModelFramework.train {self.learner_params.get('model_type')}")

        start_time = time.time()
        np.random.seed(self.learner_params["seed"])

        optuna_tuner = None
        if self._optuna_time_budget is not None and OptunaTuner.is_optimizable(
            self.learner_params.get("model_type", "")
        ):
            optuna_tuner = OptunaTuner(
                results_path,
                ml_task=self._ml_task,
                eval_metric=self.get_metric(),
                time_budget=self._optuna_time_budget,
                init_params=self._optuna_init_params,
                verbose=self._optuna_verbose,
                n_jobs=self.learner_params.get("n_jobs", -1),
                random_state=self._automl_random_state,
            )

        self.validation = ValidationStep(self.validation_params)

        repeats = self.validation.get_repeats()
        for repeat in range(repeats):
            for k_fold in range(self.validation.get_n_splits()):
                train_data, validation_data = self.validation.get_split(k_fold, repeat)
                logger.debug(
                    "Data split, train X:{} y:{}, validation X:{}, y:{}".format(
                        train_data["X"].shape,
                        train_data["y"].shape,
                        validation_data["X"].shape,
                        validation_data["y"].shape,
                    )
                )
                if "sample_weight" in train_data:
                    logger.debug("Sample weight available during the training.")

                # the proprocessing is done at every validation step
                self.preprocessings += [
                    Preprocessing(
                        self.preprocessing_params, self.get_name(), k_fold, repeat
                    )
                ]

                X_train, y_train, sample_weight = self.preprocessings[
                    -1
                ].fit_and_transform(
                    train_data["X"], train_data["y"], train_data.get("sample_weight")
                )
                (
                    X_validation,
                    y_validation,
                    sample_weight_validation,
                ) = self.preprocessings[-1].transform(
                    validation_data["X"],
                    validation_data["y"],
                    validation_data.get("sample_weight"),
                )

                # skip preprocessing for sensitive features
                # TODO: need to add sensitive features in preprocessing because some rows might be skipped (missing target)
                # then we need to skip some rows in sensitive features as well
                # TODO: drop rows if there is missing data in sensitive feature?

                # get sensitive features from data split
                sensitive_features = train_data.get("sensitive_features")
                sensitive_features_validation = validation_data.get(
                    "sensitive_features"
                )

                if optuna_tuner is not None:
                    optuna_start_time = time.time()
                    self.learner_params = optuna_tuner.optimize(
                        self.learner_params.get("model_type", ""),
                        self.params.get("data_type", ""),
                        X_train,
                        y_train,
                        sample_weight,
                        X_validation,
                        y_validation,
                        sample_weight_validation,
                        self.learner_params,
                    )
                    # exclude optuna optimize time from model training
                    start_time += time.time() - optuna_start_time

                self.learner_params["explain_level"] = self._explain_level
                self.learners += [
                    AlgorithmFactory.get_algorithm(copy.deepcopy(self.learner_params))
                ]
                learner = self.learners[-1]
                learner.set_learner_name(k_fold, repeat, repeats)

                self.callbacks.add_and_set_learner(learner)
                self.callbacks.on_learner_train_start()

                log_to_file = os.path.join(
                    results_path, model_subpath, f"{learner.name}_training.log"
                )

                for i in range(learner.max_iters):
                    self.callbacks.on_iteration_start()

                    learner.fit(
                        X_train,
                        y_train,
                        sample_weight,
                        X_validation,
                        y_validation,
                        sample_weight_validation,
                        log_to_file,
                        self._max_time_for_learner,
                    )

                    if self.params.get("injected_sample_weight", False):
                        # print("Dont use sample weight in model evaluation")
                        sample_weight = None
                        sample_weight_validation = None

                    self.callbacks.on_iteration_end(
                        {"iter_cnt": i},
                        self.predictions(
                            learner,
                            self.preprocessings[-1],
                            X_train,
                            y_train,
                            sample_weight,
                            sensitive_features,
                            X_validation,
                            y_validation,
                            sample_weight_validation,
                            sensitive_features_validation,
                        ),
                    )

                    if learner.stop_training:
                        break
                    learner.update({"step": i})

                # end of learner iters loop
                self.callbacks.on_learner_train_end()

                model_path = os.path.join(results_path, model_subpath)
                learner.interpret(
                    X_train,
                    y_train,
                    X_validation,
                    y_validation,
                    model_file_path=model_path,
                    learner_name=learner.name,
                    class_names=self.preprocessings[-1].get_target_class_names(),
                    metric_name=self.get_metric_name(),
                    ml_task=self._ml_task,
                    explain_level=self._explain_level,
                )

                # save learner and free the memory
                p = os.path.join(model_path, learner.get_fname())
                learner.save(p)
                del learner.model
                learner.model = None
                # end of learner training

                # clear data
                del X_train
                del y_train
                del X_validation
                del y_validation

                if sample_weight is not None:
                    del sample_weight
                    del train_data["sample_weight"]
                if sample_weight_validation is not None:
                    del sample_weight_validation
                    del validation_data["sample_weight"]

                del train_data["X"]
                del train_data["y"]
                del validation_data["X"]
                del validation_data["y"]
                del train_data
                del validation_data

                gc.collect()

        # end of validation loop
        self.callbacks.on_framework_train_end()
        # self.get_additional_metrics()
        self._additional_metrics = self.get_additional_metrics()

        self.train_time = time.time() - start_time
        logger.debug("ModelFramework end of training")

    def release_learners(self):
        for learner in self.learners:
            if learner.model is not None:
                del learner.model
                learner.model = None

    def get_metric_name(self):
        if self.metric_name is not None:
            return self.metric_name
        early_stopping = self.callbacks.get("early_stopping")
        if early_stopping is None:
            return None
        self.metric_name = early_stopping.metric.name
        return early_stopping.metric.name

    def get_metric(self):
        early_stopping = self.callbacks.get("early_stopping")
        if early_stopping:
            return early_stopping.metric
        return Metric({"name": self.get_metric_name()})

    def get_out_of_folds(self):
        if self.oof_predictions is not None:
            return self.oof_predictions.copy(deep=True)

        if self._oof_predictions_fname is not None:
            self.oof_predictions = pd.read_csv(self._oof_predictions_fname)
            return self.oof_predictions.copy(deep=True)

        early_stopping = self.callbacks.get("early_stopping")
        if early_stopping is None:
            return None
        self.oof_predictions = early_stopping.best_y_oof

        ###############################################################
        # in case of one-hot coded multiclass target
        target_cols = [
            c for c in self.oof_predictions.columns.tolist() if "target" in c
        ]
        if len(target_cols) > 1:
            target = self.oof_predictions[target_cols[0]].copy()
            target.name = "target"
            for i, t in enumerate(target_cols):
                target[self.oof_predictions[t] == 1] = i
            self.oof_predictions.drop(target_cols, axis=1, inplace=True)

            self.oof_predictions.insert(0, "target", np.array(target))

        return early_stopping.best_y_oof

    def get_final_loss(self):
        if self.final_loss is not None:
            return self.final_loss
        early_stopping = self.callbacks.get("early_stopping")
        if early_stopping is None:
            return None
        self.final_loss = early_stopping.final_loss
        return early_stopping.final_loss

    """
    def get_metric_logs(self):
        metric_logger = self.callbacks.get("metric_logger")
        if metric_logger is None:
            return None
        return metric_logger.loss_values
    """

    def get_type(self):
        return self.learner_params.get("model_type")

    def get_name(self):
        return self._name

    def involved_model_names(self):
        """Returns the list of all models involved in the current model.
        For single model, it returns the list with the name of the model.
        For ensemble model, it returns the list with the name of the ensemble and all internal models
        (used to build ensemble).
        For single model but trained on stacked data, it returns the list with the name of the model
        (names of models used in stacking are not included)."""
        return [self._name]

    def is_valid(self):
        """is_valid is used in Ensemble to check if it has more than 1 model in it.
        If Ensemble has only 1 model in it, then Ensemble shouldn't be used as best model
        """
        return True

    def is_fast_enough(self, max_single_prediction_time):
        # dont need to check
        if max_single_prediction_time is None:
            return True

        # no iformation about prediction time
        if self._single_prediction_time is None:
            return True

        return self._single_prediction_time < max_single_prediction_time

    def predict(self, X):
        logger.debug("ModelFramework.predict")

        if self.learners is None or len(self.learners) == 0:
            raise Exception("Learnes are not initialized")
        # run predict on all learners and return the average
        y_predicted = None  # np.zeros((X.shape[0],))
        for ind, learner in enumerate(self.learners):
            # preprocessing goes here
            X_data, _, _ = self.preprocessings[ind].transform(X.copy(), None)
            y_p = learner.predict(X_data)
            y_p = self.preprocessings[ind].inverse_scale_target(y_p)

            y_predicted = y_p if y_predicted is None else y_predicted + y_p

        y_predicted_average = y_predicted / float(len(self.learners))

        y_predicted_final = self.preprocessings[0].prepare_target_labels(
            y_predicted_average
        )

        return y_predicted_final

    def get_additional_metrics(self):
        if self._additional_metrics is None:
            logger.debug("Compute additional metrics")
            # 'target' - the target after processing used for model training
            # 'prediction' - out of folds predictions of the model
            oof_predictions = self.get_out_of_folds()
            prediction_cols = [c for c in oof_predictions.columns if "prediction" in c]
            target_cols = [c for c in oof_predictions.columns if "target" in c]

            target = oof_predictions[target_cols]

            oof_preds = None
            if self._ml_task == MULTICLASS_CLASSIFICATION:
                oof_preds = self.preprocessings[0].prepare_target_labels(
                    oof_predictions[prediction_cols].values
                )
            else:
                oof_preds = oof_predictions[prediction_cols]

            sample_weight = None
            if "sample_weight" in oof_predictions.columns:
                sample_weight = oof_predictions["sample_weight"]

            sensitive_features = None
            sensitive_cols = [c for c in oof_predictions.columns if "sensitive" in c]
            if sensitive_cols:
                sensitive_features = oof_predictions[sensitive_cols]

            self._additional_metrics = AdditionalMetrics.compute(
                target,
                oof_preds,
                sample_weight,
                self._ml_task,
                sensitive_features,
                self._fairness_metric
                if self._ml_task != REGRESSION
                else f"{self._fairness_metric}@{self.get_metric_name()}",
                self._fairness_threshold,
                self._privileged_groups,
                self._underprivileged_groups,
                self._fairness_optimization,
            )
            if self._ml_task == BINARY_CLASSIFICATION:
                self._threshold = float(self._additional_metrics["threshold"])
        return self._additional_metrics

    def get_sensitive_features_names(self):
        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        return [i for i in list(fm.keys()) if i != "fairness_optimization"]

    def get_fairness_metric(self, col_name):
        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        return fm.get(col_name, {}).get("fairness_metric_value")

    def get_fairness_optimization(self):
        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        return fm.get("fairness_optimization", {})

    def get_worst_fairness(self):
        # We have fairness metrics per sensitive feature.
        # The worst fairness metric is:
        # - for ratio metrics, the lowest fairness value from all sensitive features
        # - for difference metrics, the highest fairness value from all sensitive features
        # It is needed as bias mitigation stop criteria.

        metrics = self.get_additional_metrics()

        fm = metrics.get("fairness_metrics", {})
        worst_value = None
        for col_name, values in fm.items():
            if col_name == "fairness_optimization":
                continue
            if "ratio" in self._fairness_metric.lower():
                if worst_value is None:
                    worst_value = values.get("fairness_metric_value", 0)
                else:
                    worst_value = min(
                        worst_value, values.get("fairness_metric_value", 0)
                    )
            else:
                if worst_value is None:
                    worst_value = values.get("fairness_metric_value", 1)
                else:
                    worst_value = max(
                        worst_value, values.get("fairness_metric_value", 1)
                    )

        return worst_value

    def get_best_fairness(self):
        # We have fairness metrics per sensitive feature.
        # The best fairness metric is:
        # - for ratio metrics, the highest fairness value from all sensitive features
        # - for difference metrics, the lowest fairness value from all sensitive features
        # It is needed as bias mitigation stop criteria.

        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        best_value = None
        for col_name, values in fm.items():
            if col_name == "fairness_optimization":
                continue
            if "ratio" in self._fairness_metric.lower():
                if best_value is None:
                    best_value = values.get("fairness_metric_value", 0)
                else:
                    best_value = max(best_value, values.get("fairness_metric_value", 0))
            else:
                if best_value is None:
                    best_value = values.get("fairness_metric_value", 1)
                else:
                    best_value = min(best_value, values.get("fairness_metric_value", 1))

        return best_value

    def is_fair(self):
        if self._is_fair is not None:
            return self._is_fair
        metrics = self.get_additional_metrics()
        fm = metrics.get("fairness_metrics", {})
        for col, m in fm.items():
            if col == "fairness_optimization":
                continue
            if not m.get("is_fair", True):
                self._is_fair = False
                return False
        self._is_fair = True
        return False

    def save(self, results_path, model_subpath):
        start_time = time.time()
        model_path = os.path.join(results_path, model_subpath)
        logger.info(f"Save the model {model_path}")

        type_of_predictions = (
            "validation" if "k_folds" not in self.validation_params else "out_of_folds"
        )
        predictions_fname = os.path.join(
            model_subpath, f"predictions_{type_of_predictions}.csv"
        )
        self._oof_predictions_fname = os.path.join(results_path, predictions_fname)
        predictions = self.get_out_of_folds()
        predictions.to_csv(self._oof_predictions_fname, index=False)

        saved = [os.path.join(model_subpath, l.get_fname()) for l in self.learners]

        with open(os.path.join(model_path, "framework.json"), "w") as fout:
            preprocessing = [p.to_json() for p in self.preprocessings]
            learners_params = [learner.get_params() for learner in self.learners]

            desc = {
                "uid": self.uid,
                "name": self._name,
                "preprocessing": preprocessing,
                "learners": learners_params,
                "params": self.params,
                "saved": saved,
                "predictions_fname": predictions_fname,
                "metric_name": self.get_metric_name(),
                "final_loss": self.get_final_loss(),
                "train_time": self.get_train_time(),
                "is_stacked": self._is_stacked,
                "joblib_version": joblib.__version__,
            }
            desc["final_loss"] = str(desc["final_loss"])
            if self._threshold is not None:
                desc["threshold"] = self._threshold
            if self._single_prediction_time is not None:
                desc["single_prediction_time"] = self._single_prediction_time
            fout.write(json.dumps(desc, indent=4, cls=MLJSONEncoder))

        learning_curve_metric = self.learners[0].get_metric_name()
        if learning_curve_metric is None:
            learning_curve_metric = self.get_metric_name()

        LearningCurves.plot(
            [l.name for l in self.learners],
            learning_curve_metric,
            model_path,
            trees_in_iteration=self.additional_params.get("trees_in_step"),
        )

        # call additional metics just to be sure they are computed
        self._additional_metrics = self.get_additional_metrics()

        AdditionalMetrics.save(
            self._additional_metrics, self._ml_task, self.model_markdown(), model_path
        )

        with open(os.path.join(model_path, "status.txt"), "w") as fout:
            fout.write("ALL OK!")
        # I'm adding save time to total train time
        # there is always save after the training
        self.train_time += time.time() - start_time

    def model_markdown(self):
        long_name = AlgorithmsRegistry.get_long_name(
            self._ml_task, self.learner_params["model_type"]
        )
        short_name = self.learner_params["model_type"]
        desc = f"# Summary of {self.get_name()}\n\n"

        desc += "[<< Go back](../README.md)\n\n"

        if long_name == short_name:
            desc += f"\n## {short_name}\n"
        else:
            desc += f"\n## {long_name} ({short_name})\n"
        for k, v in self.learner_params.items():
            if k in ["model_type", "ml_task", "seed"]:
                continue
            desc += f"- **{k}**: {v}\n"
        desc += "\n## Validation\n"
        for k, v in self.validation_params.items():
            if "path" not in k:
                desc += f" - **{k}**: {v}\n"
        desc += "\n## Optimized metric\n"
        desc += f"{self.get_metric_name()}\n"
        desc += "\n## Training time\n"
        desc += f"\n{np.round(self.train_time,1)} seconds\n"
        return desc

    @staticmethod
    def load(results_path, model_subpath, lazy_load=True):
        model_path = os.path.join(results_path, model_subpath)
        logger.info(f"Loading model framework from {model_path}")

        with open(os.path.join(model_path, "framework.json")) as file:
            json_desc = json.load(file)

        joblib_version_computer = joblib.__version__
        joblib_version_framework = json_desc.get("joblib_version")

        if (
            joblib_version_framework is not None
            and joblib_version_computer != joblib_version_framework
        ):
            raise AutoMLException(
                f"Joblib version mismatch. Computer: {joblib_version_computer}, Framework: {joblib_version_framework}. Change to Framework version!"
            )

        mf = ModelFramework(json_desc["params"])
        mf.uid = json_desc.get("uid", mf.uid)
        mf._name = json_desc.get("name", mf._name)
        mf._threshold = json_desc.get("threshold")
        mf.train_time = json_desc.get("train_time", mf.train_time)
        mf.final_loss = json_desc.get("final_loss", mf.final_loss)
        mf.metric_name = json_desc.get("metric_name", mf.metric_name)
        mf._is_stacked = json_desc.get("is_stacked", mf._is_stacked)
        mf._single_prediction_time = json_desc.get(
            "single_prediction_time", mf._single_prediction_time
        )
        predictions_fname = json_desc.get("predictions_fname")
        if predictions_fname is not None:
            mf._oof_predictions_fname = os.path.join(results_path, predictions_fname)

        mf.learners = []
        for learner_desc, learner_subpath in zip(
            json_desc.get("learners"), json_desc.get("saved")
        ):
            learner_path = os.path.join(results_path, learner_subpath)
            l = AlgorithmFactory.load(learner_desc, learner_path, lazy_load)
            mf.learners += [l]

        mf.preprocessings = []
        for p in json_desc.get("preprocessing"):
            ps = Preprocessing()
            ps.from_json(p, results_path)
            mf.preprocessings += [ps]

        return mf

```
Page 4/16FirstPrevNextLast