您的位置:首页 > 编程语言

主流机器学习模型模板代码+经验分享[xgb, lgb, Keras, LR]

2018-04-22 10:55 513 查看


XGBoost调参大全: http://blog.csdn.net/han_xiaoyang/article/details/52665396XGBoost 官方API:http://xgboost.readthedocs.io/en/latest//python/python_api.html

Preprocess

# 通用的预处理框架import pandas as pdimport numpy as npimport scipy as sp# 文件读取def read_csv_file(f, logging=False):print("==========读取数据=========")data = pd.read_csv(f)if logging:print(data.head(5))print(f, "包含以下列")print(data.columns.values)print(data.describe())print(data.info())return dataLogistic Regression# 通用的LogisticRegression框架import pandas as pdimport numpy as npfrom scipy import sparsefrom sklearn.preprocessing import OneHotEncoderfrom sklearn.linear_model import LogisticRegressionfrom sklearn.preprocessing import StandardScaler# 1. load datadf_train = pd.DataFrame()df_test = pd.DataFrame()y_train = df_train['label'].values# 2. process datass = StandardScaler()# 3. feature engineering/encoding# 3.1 For Labeled Featureenc = OneHotEncoder()feats = ["creativeID", "adID", "campaignID"]for i, feat in enumerate(feats):x_train = enc.fit_transform(df_train[feat].values.reshape(-1, 1))x_test = enc.fit_transform(df_test[feat].values.reshape(-1, 1))if i == 0:X_train, X_test = x_train, x_testelse:X_train, X_test = sparse.hstack((X_train, x_train)), sparse.hstack((X_test, x_test))# 3.2 For Numerical Feature# It must be a 2-D Data for StandardScalar, otherwise reshape(-1, len(feats)) is requiredfeats = ["price", "age"]x_train = ss.fit_transform(df_train[feats].values)x_test = ss.fit_transform(df_test[feats].values)X_train, X_test = sparse.hstack((X_train, x_train)), sparse.hstack((X_test, x_test))# model traininglr = LogisticRegression()lr.fit(X_train, y_train)proba_test = lr.predict_proba(X_test)[:, 1]LightGBM1. 二分类import lightgbm as lgbimport pandas as pdimport numpy as npimport picklefrom sklearn.metrics import roc_auc_scorefrom sklearn.model_selection import train_test_splitprint("Loading Data ... ")# 导入数据train_x, train_y, test_x = load_data()# 用sklearn.cross_validation进行训练数据集划分,这里训练集和交叉验证集比例为7:3,可以自己根据需要设置X, val_X, y, val_y = train_test_split(train_x,train_y,test_size=0.05,random_state=1,stratify=train_y ## 这里保证分割后y的比例分布与原数据一致)X_train = Xy_train = yX_test = val_Xy_test = val_y# create dataset for lightgbmlgb_train = lgb.Dataset(X_train, y_train)lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)# specify your configurations as a dictparams = {'boosting_type': 'gbdt','objective': 'binary','metric': {'binary_logloss', 'auc'},'num_leaves': 5,'max_depth': 6,'min_data_in_leaf': 450,'learning_rate': 0.1,'feature_fraction': 0.9,'bagging_fraction': 0.95,'bagging_freq': 5,'lambda_l1': 1,'lambda_l2': 0.001, # 越小l2正则程度越高'min_gain_to_split': 0.2,'verbose': 5,'is_unbalance': True}# trainprint('Start training...')gbm = lgb.train(params,lgb_train,num_boost_round=10000,valid_sets=lgb_eval,early_stopping_rounds=500)print('Start predicting...')preds = gbm.predict(test_x, num_iteration=gbm.best_iteration) # 输出的是概率结果# 导出结果threshold = 0.5for pred in preds:result = 1 if pred > threshold else 0# 导出特征重要性importance = gbm.feature_importance()names = gbm.feature_name()with open('./feature_importance.txt', 'w+') as file:for index, im in enumerate(importance):string = names[index] + ', ' + str(im) + 'n'file.write(string)2. 多分类import lightgbm as lgbimport pandas as pdimport numpy as npimport picklefrom sklearn.metrics import roc_auc_scorefrom sklearn.model_selection import train_test_splitprint("Loading Data ... ")# 导入数据train_x, train_y, test_x = load_data()# 用sklearn.cross_validation进行训练数据集划分,这里训练集和交叉验证集比例为7:3,可以自己根据需要设置X, val_X, y, val_y = train_test_split(train_x,train_y,test_size=0.05,random_state=1,stratify=train_y ## 这里保证分割后y的比例分布与原数据一致)X_train = Xy_train = yX_test = val_Xy_test = val_y# create dataset for lightgbmlgb_train = lgb.Dataset(X_train, y_train)lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)# specify your configurations as a dictparams = {'boosting_type': 'gbdt','objective': 'multiclass','num_class': 9,'metric': 'multi_error','num_leaves': 300,'min_data_in_leaf': 100,'learning_rate': 0.01,'feature_fraction': 0.8,'bagging_fraction': 0.8,'bagging_freq': 5,'lambda_l1': 0.4,'lambda_l2': 0.5,'min_gain_to_split': 0.2,'verbose': 5,'is_unbalance': True}# trainprint('Start training...')gbm = lgb.train(params,lgb_train,num_boost_round=10000,valid_sets=lgb_eval,early_stopping_rounds=500)print('Start predicting...')preds = gbm.predict(test_x, num_iteration=gbm.best_iteration) # 输出的是概率结果# 导出结果for pred in preds:result = prediction = int(np.argmax(pred))# 导出特征重要性importance = gbm.feature_importance()names = gbm.feature_name()with open('./feature_importance.txt', 'w+') as file:for index, im in enumerate(importance):string = names[index] + ', ' + str(im) + 'n'file.write(string)XGBoost1. 二分类import numpy as npimport pandas as pdimport xgboost as xgbimport timefrom sklearn.model_selection import StratifiedKFoldfrom sklearn.model_selection import train_test_splittrain_x, train_y, test_x = load_data()# 构建特征# 用sklearn.cross_validation进行训练数据集划分,这里训练集和交叉验证集比例为7:3,可以自己根据需要设置X, val_X, y, val_y = train_test_split(train_x,train_y,test_size=0.01,random_state=1,stratify=train_y)# xgb矩阵赋值xgb_val = xgb.DMatrix(val_X, label=val_y)xgb_train = xgb.DMatrix(X, label=y)xgb_test = xgb.DMatrix(test_x)# xgboost模型 #####################params = {'booster': 'gbtree',# 'objective': 'multi:softmax', # 多分类的问题、# 'objective': 'multi:softprob', # 多分类概率'objective': 'binary:logistic','eval_metric': 'logloss',# 'num_class': 9, # 类别数,与 multisoftmax 并用'gamma': 0.1, # 用于控制是否后剪枝的参数,越大越保守,一般0.1、0.2这样子。'max_depth': 8, # 构建树的深度,越大越容易过拟合'alpha': 0, # L1正则化系数'lambda': 10, # 控制模型复杂度的权重值的L2正则化项参数,参数越大,模型越不容易过拟合。'subsample': 0.7, # 随机采样训练样本'colsample_bytree': 0.5, # 生成树时进行的列采样'min_child_weight': 3,# 这个参数默认是 1,是每个叶子里面 h 的和至少是多少,对正负样本不均衡时的 0-1 分类而言# ,假设 h 在 0.01 附近,min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本。# 这个参数非常影响结果,控制叶子节点中二阶导的和的最小值,该参数值越小,越容易 overfitting。'silent': 0, # 设置成1则没有运行信息输出,最好是设置为0.'eta': 0.03, # 如同学习率'seed': 1000,'nthread': -1, # cpu 线程数'missing': 1,'scale_pos_weight': (np.sum(y==0)/np.sum(y==1)) # 用来处理正负样本不均衡的问题,通常取:sum(negative cases) / sum(positive cases)# 'eval_metric': 'auc'}plst = list(params.items())num_rounds = 2000 # 迭代次数watchlist = [(xgb_train, 'train'), (xgb_val, 'val')]# 交叉验证result = xgb.cv(plst, xgb_train, num_boost_round=200, nfold=4, early_stopping_rounds=200, verbose_eval=True, folds=StratifiedKFold(n_splits=4).split(X, y))# 训练模型并保存# early_stopping_rounds 当设置的迭代次数较大时,early_stopping_rounds 可在一定的迭代次数内准确率没有提升就停止训练model = xgb.train(plst, xgb_train, num_rounds, watchlist, early_stopping_rounds=200)model.save_model('../data/model/xgb.model') # 用于存储训练出的模型preds = model.predict(xgb_test)# 导出结果threshold = 0.5for pred in preds:result = 1 if pred > threshold else 0CatBoost没用过,听老铁说还行Keras1. 二分类import numpy as npimport pandas as pdimport timefrom sklearn.model_selection import train_test_splitfrom matplotlib import pyplot as pltfrom keras.models import Sequentialfrom keras.layers import Dropoutfrom keras.layers import Dense, Activationfrom keras.utils.np_utils import to_categorical# coding=utf-8from model.util import load_data as load_data_1from model.util_combine_train_test import load_data as load_data_2from sklearn.preprocessing import StandardScaler # 用于特征的标准化from sklearn.preprocessing import Imputerprint("Loading Data ... ")# 导入数据train_x, train_y, test_x = load_data()# 构建特征X_train = train_x.valuesX_test = test_x.valuesy = train_yimp = Imputer(missing_values='NaN', strategy='mean', axis=0)X_train = imp.fit_transform(X_train)sc = StandardScaler()sc.fit(X_train)X_train = sc.transform(X_train)X_test = sc.transform(X_test)model = Sequential()model.add(Dense(256, input_shape=(X_train.shape[1],)))model.add(Activation('tanh'))model.add(Dropout(0.3))model.add(Dense(512))model.add(Activation('relu'))model.add(Dropout(0.3))model.add(Dense(512))model.add(Activation('tanh'))model.add(Dropout(0.3))model.add(Dense(256))model.add(Activation('linear'))model.add(Dense(1)) # 这里需要和输出的维度一致model.add(Activation('sigmoid'))# For a multi-class classification problemmodel.compile(loss='binary_crossentropy',optimizer='rmsprop',metrics=['accuracy'])epochs = 100model.fit(X_train, y, epochs=epochs, batch_size=2000, validation_split=0.1, shuffle=True)# 导出结果threshold = 0.5for index, case in enumerate(X_test):case =np.array([case])prediction_prob = model.predict(case)prediction = 1 if prediction_prob[0][0] > threshold else 02. 多分类import numpy as npimport pandas as pdimport timefrom sklearn.model_selection import train_test_splitfrom matplotlib import pyplot as pltfrom keras.models import Sequentialfrom keras.layers import Dropoutfrom keras.layers import Dense, Activationfrom keras.utils.np_utils import to_categorical# coding=utf-8from model.util import load_data as load_data_1from model.util_combine_train_test import load_data as load_data_2from sklearn.preprocessing import StandardScaler # 用于特征的标准化from sklearn.preprocessing import Imputerprint("Loading Data ... ")# 导入数据train_x, train_y, test_x = load_data()# 构建特征X_train = train_x.valuesX_test = test_x.valuesy = train_y# 特征处理sc = StandardScaler()sc.fit(X_train)X_train = sc.transform(X_train)X_test = sc.transform(X_test)y = to_categorical(y) ## 这一步很重要,一定要将多类别的标签进行one-hot编码model = Sequential()model.add(Dense(256, input_shape=(X_train.shape[1],)))model.add(Activation('tanh'))model.add(Dropout(0.3))model.add(Dense(512))model.add(Activation('relu'))model.add(Dropout(0.3))model.add(Dense(512))model.add(Activation('tanh'))model.add(Dropout(0.3))model.add(Dense(256))model.add(Activation('linear'))model.add(Dense(9)) # 这里需要和输出的维度一致model.add(Activation('softmax'))# For a multi-class classification problemmodel.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuracy'])epochs = 200model.fit(X_train, y, epochs=epochs, batch_size=200, validation_split=0.1, shuffle=True)# 导出结果for index, case in enumerate(X_test):case = np.array([case])prediction_prob = model.predict(case)prediction = np.argmax(prediction_prob)处理正负样本不均匀的案例有些案例中,正负样本数量相差非常大,数据严重unbalanced,这里提供几个解决的思路# 计算正负样本比例positive_num = df_train[df_train['label']==1].values.shape[0]negative_num = df_train[df_train['label']==0].values.shape[0]print(float(positive_num)/float(negative_num))主要思路1. 手动调整正负样本比例2. 过采样 Over-Sampling对训练集里面样本数量较少的类别(少数类)进行过采样,合成新的样本来缓解类不平衡,比如SMOTE算法3. 欠采样 Under-Sampling4. 将样本按比例一一组合进行训练,训练出多个弱分类器,最后进行集成框架推荐Github上大神写的相关框架,专门用来处理此类问题:https://github.com/scikit-learn-contrib/imbalanced-learn实践永远是检验真理的不二选择。多打打比赛,对各种业务环境下的任务都能有所了解,也能学习新技术。

阅读更多
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: