标签:save 图片 gbdt 错误 text dma 编译 article 计算
import pandas as pd import xgboost as xgb import operator from matplotlib import pylab as plt def ceate_feature_map(features): outfile = open(‘xgb.fmap‘, ‘w‘) i = 0 for feat in features: outfile.write(‘{0}\t{1}\tq\n‘.format(i, feat)) i = i + 1 outfile.close() def get_data(): train = pd.read_csv("../input/train.csv") features = list(train.columns[2:]) y_train = train.Hazard for feat in train.select_dtypes(include=[‘object‘]).columns: m = train.groupby([feat])[‘Hazard‘].mean() train[feat].replace(m,inplace=True) x_train = train[features] return features, x_train, y_train def get_data2(): from sklearn.datasets import load_iris #获取数据 iris = load_iris() x_train=pd.DataFrame(iris.data) features=["sepal_length","sepal_width","petal_length","petal_width"] x_train.columns=features y_train=pd.DataFrame(iris.target) return features, x_train, y_train #features, x_train, y_train = get_data() features, x_train, y_train = get_data2() ceate_feature_map(features) xgb_params = {"objective": "reg:linear", "eta": 0.01, "max_depth": 8, "seed": 42, "silent": 1} num_rounds = 1000 dtrain = xgb.DMatrix(x_train, label=y_train) gbdt = xgb.train(xgb_params, dtrain, num_rounds) importance = gbdt.get_fscore(fmap=‘xgb.fmap‘) importance = sorted(importance.items(), key=operator.itemgetter(1)) df = pd.DataFrame(importance, columns=[‘feature‘, ‘fscore‘]) df[‘fscore‘] = df[‘fscore‘] / df[‘fscore‘].sum() plt.figure() df.plot() df.plot(kind=‘barh‘, x=‘feature‘, y=‘fscore‘, legend=False, figsize=(16, 10)) plt.title(‘XGBoost Feature Importance‘) plt.xlabel(‘relative importance‘) plt.gcf().savefig(‘feature_importance_xgb.png‘)
根据结构分数的增益情况计算出来选择哪个特征的哪个分割点,某个特征的重要性,就是它在所有树中出现的次数之和。
参考:https://blog.csdn.net/q383700092/article/details/53698760
另外:使用xgboost,遇到一个问题
标签:save 图片 gbdt 错误 text dma 编译 article 计算
原文地址:https://www.cnblogs.com/Allen-rg/p/8868109.html