ag first commit
This commit is contained in:
4302
notebooks/TRAIN.ipynb
Normal file
4302
notebooks/TRAIN.ipynb
Normal file
File diff suppressed because one or more lines are too long
320
notebooks/Variable_explanation.ipynb
Normal file
320
notebooks/Variable_explanation.ipynb
Normal file
File diff suppressed because one or more lines are too long
97
src/main.py
Normal file
97
src/main.py
Normal file
@@ -0,0 +1,97 @@
|
||||
from utils import retrive_data, split
|
||||
from model import train, gain_accuracy_train
|
||||
from sklearn.metrics import confusion_matrix,matthews_corrcoef,accuracy_score
|
||||
import xgboost as xgb
|
||||
import pandas as pd
|
||||
import pickle
|
||||
import argparse
|
||||
|
||||
def main(args):
|
||||
|
||||
|
||||
|
||||
labeled,labeled_small = retrive_data(reload_data=args.reload_data,threshold_under_represented=0.5,path='/home/agobbi/Projects/PID/datanalytics/PID/src')
|
||||
|
||||
dataset,dataset_test = split(labeled_small if args.use_small else labeled ,
|
||||
SKI_AREA_TEST= 'Klausberg',
|
||||
SEASON_TEST_SKIAREA = 'Kronplatz',
|
||||
SEASON_TEST_YEAR= 2023,
|
||||
use_smote = args.use_smote,
|
||||
weight_type = 'sqrt' )
|
||||
if args.retrain:
|
||||
|
||||
|
||||
print('OPTUNA hyperparameter tuning, please wait!')
|
||||
best_model,params_final = train(dataset,n_trials=args.n_trials,timeout=600,num_boost_round=600)
|
||||
feat_imp = pd.Series(best_model.get_fscore()).sort_values(ascending=False)
|
||||
|
||||
with open('best_params.pkl','wb') as f:
|
||||
pickle.dump([params_final,feat_imp,best_model],f)
|
||||
|
||||
else:
|
||||
with open('best_params.pkl','rb') as f:
|
||||
params_final,feat_imp,best_model = pickle.load(f)
|
||||
|
||||
|
||||
|
||||
|
||||
#for retriving prediction must convert to DMatrix type
|
||||
tmp_train = xgb.DMatrix(dataset.X_train[best_model.feature_names],dataset.y_train,enable_categorical=True)
|
||||
tmp_valid = xgb.DMatrix(dataset.X_valid[best_model.feature_names],dataset.y_valid,enable_categorical=True)
|
||||
|
||||
|
||||
preds_class_valid = best_model.predict(tmp_valid)
|
||||
preds_class_train= best_model.predict(tmp_train)
|
||||
print('##################RESULT ON THE TRAIN SET#####################')
|
||||
print(confusion_matrix(dataset.y_train,preds_class_train.argmax(1)))
|
||||
print(f'MCC:{matthews_corrcoef(dataset.y_train,preds_class_train.argmax(1))}')
|
||||
print(f'ACC:{accuracy_score(dataset.y_train,preds_class_train.argmax(1))}')
|
||||
print('##################RESULT ON THE VALIDATION SET#####################')
|
||||
print(confusion_matrix(dataset.y_valid,preds_class_valid.argmax(1)))
|
||||
print(f'MCC:{matthews_corrcoef(dataset.y_valid,preds_class_valid.argmax(1))}')
|
||||
print(f'ACC:{accuracy_score(dataset.y_valid,preds_class_valid.argmax(1))}')
|
||||
|
||||
|
||||
|
||||
|
||||
if args.retrain_last_model:
|
||||
tot,bst_FS,FS = gain_accuracy_train(dataset,feat_imp,num_boost_round=600,params=params_final)
|
||||
with open('best_params_and_final_model.pkl','wb') as f:
|
||||
pickle.dump([tot,bst_FS,FS],f)
|
||||
else:
|
||||
with open('best_params_and_final_model.pkl','rb') as f:
|
||||
tot,bst_FS,FS = pickle.load(f)
|
||||
|
||||
dtest_FS = xgb.DMatrix(dataset_test.X_test_area[bst_FS.feature_names],dataset_test.y_test_area,enable_categorical=True,)
|
||||
dtest_season_FS = xgb.DMatrix(dataset_test.X_test_season[bst_FS.feature_names],dataset_test.y_test_season,enable_categorical=True,)
|
||||
preds_class_test = bst_FS.predict(dtest_FS)
|
||||
preds_class_test_season = bst_FS.predict(dtest_season_FS)
|
||||
|
||||
mcc = matthews_corrcoef(dataset_test.y_test_area,preds_class_test.argmax(1))
|
||||
acc = accuracy_score(dataset_test.y_test_area,preds_class_test.argmax(1))
|
||||
cm = confusion_matrix(dataset_test.y_test_area,preds_class_test.argmax(1))
|
||||
|
||||
print(f'RESULT ON THE TEST SKI AREA {mcc=}, {acc=}, \n{cm=}')
|
||||
mcc = matthews_corrcoef(dataset_test.y_test_season,preds_class_test_season.argmax(1))
|
||||
acc = accuracy_score(dataset_test.y_test_season,preds_class_test_season.argmax(1))
|
||||
cm = confusion_matrix(dataset_test.y_test_season,preds_class_test_season.argmax(1))
|
||||
|
||||
print(f'RESULT ON THE TEST SKI SEASON {mcc=}, {acc=}, {cm=}')
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description='Train Optuna XGBOOST model')
|
||||
parser.add_argument('--use_small', action='store_true', help="Aggregate under represented input classes (es: rare country)")
|
||||
parser.add_argument('--use_smote', action='store_true', help='oversampling underrperesented target labels')
|
||||
parser.add_argument('--retrain', action='store_true', help='Retrain the optuna searcher')
|
||||
parser.add_argument('--reload_data', action='store_true', help='Dowload data from db')
|
||||
parser.add_argument('--retrain_last_model', action='store_true', help='retrain the last model')
|
||||
parser.add_argument('--n_trials', type=int,default=1000, help='number of trials per optuna')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
main(args)
|
||||
|
||||
#python main.py --use_small --retrain --retrain_last_model --n_trials=10 --reload_data
|
||||
99
src/model.py
Normal file
99
src/model.py
Normal file
@@ -0,0 +1,99 @@
|
||||
|
||||
import xgboost as xgb
|
||||
from sklearn.metrics import matthews_corrcoef, accuracy_score
|
||||
import optuna
|
||||
from utils import Dataset
|
||||
import pandas as pd
|
||||
|
||||
|
||||
|
||||
def objective(trial,dataset:Dataset,num_boost_round:int):
|
||||
|
||||
#These are the parameters usually used
|
||||
params = dict(
|
||||
learning_rate = trial.suggest_float("learning_rate", 0.01, 0.2),
|
||||
max_depth= trial.suggest_int("max_depth",5, 15),
|
||||
min_child_weight = trial.suggest_int("min_child_weight", 1, 8),
|
||||
gamma = trial.suggest_float("gamma", 0, 10),
|
||||
subsample = trial.suggest_float("subsample", 0.01,1),
|
||||
colsample_bytree = trial.suggest_float("colsample_bytree", 0.01,1),
|
||||
alpha = trial.suggest_float("alpha", 0, 10),
|
||||
objective= 'multi:softprob',
|
||||
nthread=4,
|
||||
num_class= 5,
|
||||
seed=27)
|
||||
params['lambda'] = trial.suggest_float("lambda", 0, 10)
|
||||
|
||||
|
||||
dtrain = xgb.DMatrix(dataset.X_train,dataset.y_train,
|
||||
enable_categorical=True,
|
||||
weight=dataset.weight_train)
|
||||
dvalid = xgb.DMatrix(dataset.X_valid,dataset.y_valid,
|
||||
enable_categorical=True,
|
||||
)
|
||||
|
||||
|
||||
bst = xgb.train(params, dtrain,verbose_eval=False, num_boost_round=num_boost_round,
|
||||
evals = [(dtrain, "train"), (dvalid, "valid")],
|
||||
early_stopping_rounds=100)
|
||||
|
||||
preds = bst.predict(dvalid)
|
||||
##MCC is more solid
|
||||
mcc = matthews_corrcoef(dataset.y_valid,preds.argmax(1))
|
||||
|
||||
return mcc
|
||||
|
||||
|
||||
def train(dataset,n_trials=1000,timeout=600,num_boost_round=600):
|
||||
|
||||
study = optuna.create_study(direction="maximize")
|
||||
study.optimize(lambda trial: objective(trial,dataset,num_boost_round), n_trials=n_trials, timeout=timeout)
|
||||
|
||||
params_final = dict(
|
||||
objective= 'multi:softprob',
|
||||
nthread=4,
|
||||
num_class= 5, ## 5 classi india
|
||||
seed=27)
|
||||
params_final.update(study.best_params)
|
||||
dtrain = xgb.DMatrix(dataset.X_train,dataset.y_train,
|
||||
enable_categorical=True,
|
||||
weight=dataset.weight_train)
|
||||
dvalid = xgb.DMatrix(dataset.X_valid,dataset.y_valid,
|
||||
enable_categorical=True,
|
||||
)
|
||||
bst = xgb.train(params_final, dtrain,verbose_eval=False, num_boost_round=num_boost_round,
|
||||
evals = [(dtrain, "train"), (dvalid, "valid")],
|
||||
early_stopping_rounds=100,)
|
||||
return bst,params_final
|
||||
|
||||
|
||||
def gain_accuracy_train(dataset:Dataset,feat_imp:pd.DataFrame,num_boost_round:int,params:dict):
|
||||
|
||||
tot = []
|
||||
for i in range(1,dataset.X_train.shape[1]):
|
||||
|
||||
dtrain_FS = xgb.DMatrix(dataset.X_train[list(feat_imp.head(i).index)],dataset.y_train,
|
||||
enable_categorical=True,
|
||||
weight=dataset.weight_train)
|
||||
dvalid_FS = xgb.DMatrix(dataset.X_valid[list(feat_imp.head(i).index)],dataset.y_valid, enable_categorical=True,)
|
||||
|
||||
bst_FS = xgb.train(params, dtrain_FS,verbose_eval=False, num_boost_round=num_boost_round,
|
||||
evals = [(dtrain_FS, "train"), (dvalid_FS, "valid")],
|
||||
early_stopping_rounds=100,)
|
||||
preds_class_valid = bst_FS.predict(dvalid_FS)
|
||||
mcc = matthews_corrcoef(dataset.y_valid,preds_class_valid.argmax(1))
|
||||
acc = accuracy_score(dataset.y_valid,preds_class_valid.argmax(1))
|
||||
tot.append({'mcc':mcc,'acc':acc,'FS':i})
|
||||
|
||||
tot = pd.DataFrame(tot)
|
||||
FS = int(tot.loc[tot.acc.argmax()].FS) ## get best
|
||||
print(f'Best model with {FS} features, retraining....')
|
||||
|
||||
dtrain_FS = xgb.DMatrix(dataset.X_train[list(feat_imp.head(FS).index)],dataset.y_train, enable_categorical=True, weight=dataset.weight_train)
|
||||
dvalid_FS = xgb.DMatrix(dataset.X_valid[list(feat_imp.head(FS).index)],dataset.y_valid,enable_categorical=True, )
|
||||
bst_FS = xgb.train(params, dtrain_FS,verbose_eval=False, num_boost_round=num_boost_round,
|
||||
evals = [(dtrain_FS, "train"), (dvalid_FS, "valid")],
|
||||
early_stopping_rounds=100,)
|
||||
|
||||
|
||||
return tot,bst_FS,FS
|
||||
149
src/utils.py
Normal file
149
src/utils.py
Normal file
@@ -0,0 +1,149 @@
|
||||
|
||||
import pandas as pd
|
||||
import psycopg2 as pg
|
||||
import numpy as np
|
||||
from sklearn.model_selection import train_test_split
|
||||
import pickle
|
||||
from dataclasses import dataclass
|
||||
from typing import Union
|
||||
import os
|
||||
##AUXILIARY CLASSES
|
||||
@dataclass
|
||||
class Dataset:
|
||||
X_train:Union[pd.DataFrame,None]
|
||||
y_train:Union[pd.Series,None]
|
||||
X_valid:Union[pd.DataFrame,None]
|
||||
y_valid:Union[pd.Series,None]
|
||||
weight_train:Union[np.array, None]
|
||||
|
||||
@dataclass
|
||||
class Dataset_test:
|
||||
X_test_area:Union[pd.DataFrame,None]
|
||||
y_test_area:Union[pd.Series,None]
|
||||
X_test_season:Union[pd.DataFrame,None]
|
||||
y_test_season:Union[pd.Series,None]
|
||||
|
||||
|
||||
|
||||
def retrive_data(reload_data:bool,threshold_under_represented:float,path:str):
|
||||
if reload_data:
|
||||
engine = pg.connect("dbname='safeidx' user='fbk_mpba' host='172.104.247.67' port='5432' password='fbk2024$'")
|
||||
df = pd.read_sql('select * from fbk_export_20240212', con=engine)
|
||||
with open(os.path.join(path,'data.pkl'),'wb') as f:
|
||||
pickle.dump(df,f)
|
||||
else:
|
||||
with open(os.path.join(path,'data.pkl'),'rb') as f:
|
||||
df = pickle.load(f)
|
||||
|
||||
## these columns can lead to overfit!
|
||||
df.drop(columns=['dateandtime','skiarea_id','day_of_year','minute_of_day','year'], inplace=True)
|
||||
|
||||
##evacuation_vehicles must be explicitated
|
||||
ev = set({})
|
||||
for i,row in df.iterrows():
|
||||
ev = ev.union(set(row.evacuation_vehicles))
|
||||
for c in ev:
|
||||
df[c] = False
|
||||
for i,row in df.iterrows():
|
||||
for c in row.evacuation_vehicles:
|
||||
df.loc[i,c] = True
|
||||
df.drop(columns=['town','province','evacuation_vehicles'],inplace=True)
|
||||
|
||||
|
||||
labeled = df[~pd.isna(df.india)].reset_index().drop(columns='index')
|
||||
labeled['age'] = labeled['age'].astype(np.float32).fillna(np.nan)
|
||||
|
||||
|
||||
## there are some under-represented categories,
|
||||
## maybe it is possible to obtain a more stable model removing such classes
|
||||
to_remove = {}
|
||||
for c in labeled.columns:
|
||||
if c not in ['india','age','season','skiarea_name']:
|
||||
labeled[c] = labeled[c].astype('str')
|
||||
tmp = labeled.groupby(c)[c].count()
|
||||
tmp = 100*tmp/tmp.max()
|
||||
tmp = tmp[tmp<threshold_under_represented]
|
||||
if len(tmp)>0:
|
||||
for k in tmp.index:
|
||||
if c not in to_remove.keys():
|
||||
to_remove[c]= []
|
||||
to_remove[c].append(k)
|
||||
|
||||
## keep the datasets
|
||||
labeled_small = labeled.copy()
|
||||
for c in to_remove.keys():
|
||||
for k in to_remove[c]:
|
||||
labeled_small.loc[labeled_small[c]==k,c] = 'other'
|
||||
for c in labeled_small.columns:
|
||||
if c not in ['age','season','skiarea_name']:
|
||||
labeled_small[c] = labeled_small[c].fillna('None').astype('category')
|
||||
labeled[c] = labeled[c].fillna('None').astype('category')
|
||||
labeled.dropna(inplace=True)
|
||||
labeled_small.dropna(inplace=True)
|
||||
|
||||
#transform india into integers
|
||||
labeled.india = labeled.india.apply(lambda x: x.replace('i','')).astype(int)
|
||||
labeled_small.india = labeled_small.india.apply(lambda x: x.replace('i','')).astype(int)
|
||||
|
||||
return labeled,labeled_small
|
||||
|
||||
|
||||
|
||||
def split(labeled:pd.DataFrame,
|
||||
SKI_AREA_TEST: str = 'Klausberg',
|
||||
SEASON_TEST_SKIAREA:str = 'Kronplatz',
|
||||
SEASON_TEST_YEAR:int = 2023,
|
||||
use_smote:bool = False,
|
||||
weight_type:str = 'sqrt' ):
|
||||
|
||||
|
||||
|
||||
test_area = labeled[labeled.skiarea_name==SKI_AREA_TEST]
|
||||
test_area_season = labeled[(labeled.skiarea_name==SEASON_TEST_SKIAREA)&(labeled.season>=SEASON_TEST_YEAR)]
|
||||
|
||||
##remove from dataset the corresponding test rows
|
||||
labeled_tmp = labeled[labeled.skiarea_name!=SKI_AREA_TEST]
|
||||
labeled_tmp = labeled_tmp[(labeled_tmp.skiarea_name!=SEASON_TEST_SKIAREA)|(labeled_tmp.season<SEASON_TEST_YEAR) ]
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
X_train, X_valid, y_train, y_valid = train_test_split( labeled_tmp.drop(columns=['india','season','skiarea_name']),
|
||||
labeled_tmp.india, test_size=0.33, random_state=0,stratify=labeled_tmp.india)
|
||||
|
||||
if use_smote:
|
||||
from imblearn.over_sampling import RandomOverSampler
|
||||
|
||||
sm = RandomOverSampler()
|
||||
X_train_smote,y_train_smote = sm.fit_resample(X_train,y_train)
|
||||
X_train,y_train = sm.fit_resample(X_train,y_train)
|
||||
|
||||
##computed the weights for unbalanced dataset
|
||||
|
||||
w = pd.DataFrame(np.unique(y_train,return_counts=True)).T
|
||||
w.columns = ['class','p']
|
||||
|
||||
##when computing the error, these are the weights used for each class: you can punish more errpr on most severe clases
|
||||
if weight_type == 'sqrt':
|
||||
w.p = np.sqrt(w.p.sum())/w.p
|
||||
print(w)
|
||||
elif weight_type == 'sum':
|
||||
w.p = w.p.sum()/w.p/w.shape[0]
|
||||
print(w)
|
||||
else:
|
||||
print(f'{weight_type=} not implemented please use a valid one: sqrt or sum, I will set all the weights to 0')
|
||||
w.p = 1
|
||||
|
||||
if use_smote is False:
|
||||
weight_train = pd.merge(pd.DataFrame({'class':y_train}),w).p.values
|
||||
else:
|
||||
w.p = 1
|
||||
weight_train = pd.merge(pd.DataFrame({'class':y_train}),w).p.values
|
||||
|
||||
|
||||
dataset = Dataset(X_train, y_train, X_valid, y_valid,weight_train)
|
||||
dataset_test = Dataset_test(test_area,test_area.india,test_area_season,test_area_season.india)
|
||||
|
||||
return dataset,dataset_test
|
||||
|
||||
Reference in New Issue
Block a user