正文

数据可视化

#数据观察(可视化)
import seaborn
import matplotlib.pyplot as plt  

for col in all_data.columns:
    seaborn.distplot(train[col])
    seaborn.distplot(test[col])
    plt.show()

pandas预处理数据

1.删除缺失值超过90%的列,
2.删除异常值
3.删除小方差数据

# 查看各列缺失比例,查看每列最大占比类别所占当列的比例,如果太大,则该列贡献小,可去除。
for col in train.columns:
    stats.append((col, train[col].nunique(), train[col].isnull().sum() * 100 / train.shape[0], 	   	 train[col].value_counts(normalize=True, dropna=False).values[0] * 100, train[col].dtype))
    
stats_df = pd.DataFrame(stats, columns=['Feature', 'Unique_values', 'Percentage of missing values', 'Percentage of values in the biggest category', 'type'])
stats_df.sort_values('Percentage of missing values', ascending=False)[:10]   

# 删除缺失率超过90%的列
good_cols = list(train.columns)
for col in train.columns:
    rate = train[col].value_counts(normalize=True, dropna=False).values[0]
    if rate > 0.9:
        good_cols.remove(col)

# 删除异常值
train = train[train['收率']>0.87]
train.reset_index(drop=True,inplace=True)        
train = train[good_cols]
good_cols.remove('收率')
test  = test[good_cols]

# 删除小方差数据字段,因为变化不大,所以对模型预测的贡献不大。
VT=VarianceThreshold(threshold=0.5)
input_data = pd.DataFrame(VT.fit_transform(input_data))

数据标准化

from sklearn import preprocessing
min_max_scaler = preprocessing.MinMaxScaler()
data_minmax = pd.DataFrame(min_max_scaler.fit_transform(all_data),columns=all_data.columns)
X_scaled = pd.DataFrame(preprocessing.scale(data_minmax),columns = data_minmax.columns)
train_x = X_scaled.ix[0:len(train)-1]

空值处理

将数据中的inf与nan值进行查找替换

# 查看空值DataFrame数据
data[data.isin([np.nan, np.inf, -np.inf]).any(1)]
# 替换inf
data = data.replace([np.inf, -np.inf], np.nan)
# 填充空值
data = data.fillna(-1)

数据分析

1.查看每一列与target的相关性系数

import seaborn as sns

train_data['target'] = target # 添加label到train_data
corrmat = train_data.corr() # 计算相关性系数
top_corr_features = corrmat.index[abs(corrmat["target"])>0.5] # 得到相关性系数大于0.5的
# 绘出相关系数图
plt.figure(figsize=(10,10))
g = sns.heatmap(train_data[top_corr_features].corr(),annot=True,cmap="RdYlGn")

# 绘图查看相关性系数高的几列数值对应关系
train_data = train_data.fillna(-1) # 需要先处理空值,否则无法绘图
sns.set()
cols = list(top_corr_features) # 相关性系数大于0.5的列
sns.pairplot(train_data[cols], size = 2.5)
plt.show();

# 查看target的分布
from scipy import stats
from scipy.stats import norm, skew #for some statistics

# μ = 0,σ = 1时的正态分布是标准正态分布
sns.distplot(train_data['target'] , fit=norm);
# Get the fitted parameters used by the function
(mu, sigma) = norm.fit(train_data['target'])
print( '\n mu = {:.2f} and sigma = {:.2f}\n'.format(mu, sigma))
plt.legend(['Normal dist. ($\mu=$ {:.2f} and $\sigma=$ {:.2f} )'.format(mu, sigma)],
            loc='best')
plt.ylabel('Frequency')
plt.title('target distribution')

fig = plt.figure()
res = stats.probplot(train_data['target'], plot=plt)
plt.show()

数据编码处理

通常情况下一组特征颜色特征红、黄、蓝encode为1,2,3后,类别距离就会不一样,红与蓝的特征差距大于红与黄,然而真实情况是它们彼此的距离相等。使用onehot编码后,(001,010,100)它们之间的距离就会变为一样。

# 对数据重新编码
for f in cate_columns:
    data[f] = data[f].map(dict(zip(data[f].unique(), range(0, data[f].nunique())))
    
#对标签制作成箱体结构
train['target'] = target
train['intTarget'] = pd.cut(train['target'], 5, labels=False)
train = pd.get_dummies(train, columns=['intTarget'])
li = ['intTarget_0','intTarget_1','intTarget_2','intTarget_3','intTarget_4']

#计算每一个字段每个类别对应在五个箱体的贡献度(均值表示)。
for f1 in cate_columns:
    for f2 in li:
        col_name = f1+"_"+f2+'_mean'
        mean_features.append(col_name)
        order_label = train.groupby([f1])[f2].mean()
        for df in [train, test]:
            df[col_name] = df[f1].map(order_label)

train.drop(li, axis=1, inplace=True)

# 将类别字段进行one hot编码
X_train = train[mean_columns+numerical_columns].values
X_test = test[mean_columns+numerical_columns].values
# one hot
enc = OneHotEncoder()
# categorical_columns 需要onehot的字段
for f in categorical_columns:
    enc.fit(data[f].values.reshape(-1, 1))
    X_train = sparse.hstack((X_train, enc.transform(train[f].values.reshape(-1, 1))), 'csr')
    X_test = sparse.hstack((X_test, enc.transform(test[f].values.reshape(-1, 1))), 'csr')
print(X_train.shape)
print(X_test.shape)

特征选择实现

from sklearn.preprocessing import RobustScaler, StandardScaler
scaler = RobustScaler()

X_scaled = scaler.fit(train_data).transform(train_data)
y_log = np.log(target)
test_X_scaled = scaler.transform(test_data)

from sklearn.linear_model import Lasso
lasso=Lasso(alpha=0.001)
lasso.fit(X_scaled,y_log)

FI_lasso = pd.DataFrame({"Feature Importance":lasso.coef_}, index=data_pipe.columns)
FI_lasso.sort_values("Feature Importance",ascending=False)
Logo

CSDN联合极客时间,共同打造面向开发者的精品内容学习社区,助力成长!

更多推荐