- 显示每个unique value 和它的频次:
df['col'].value_counts()
merchants['category_2'].unique()
merchants.groupby('category_2').size()
- 按组显示统计数据:
按col1分组后,每组的col2的均值
df.groupby(['col1'])['col2'].mean()
- 分组后显示统计数据
按col1分组后,每组的col2,col3 的 各项统计数据:
df.groupby(['col1']).agg({'col2': ['mean', 'std', 'count'], 'col3': ['mean', 'std']})
- 将categorical variable 用numerical代替
- binary variable: ( 用 0, 1代替 ‘X', 'Y' )
df['col1'] = df['col1'].apply(lambda x: 1 if x == 'Y' else 0)
- categorical variable: ( 用0,1,2,3 代替 'A', 'B', 'C', 'nan' )
此处需注意会将objects --》 float,需转回categorical
map_dict = {'A': 0, 'B': 1, 'C': 2, 'nan': 3}
df['col3'] = df['col3'].apply(lambda x: map_dict[str(x)])
# OR:
# Convert categorical variables:
df['col'] = df['col'].astype('category')
# reassign numbers for categorical variance:
cat_columns = df.select_dtypes(['category']).columns
df[cat_columns] = df[cat_columns].apply(lambda x: x.cat.codes)
- get dummies
# Convert to dummies (only for categorical 'object')
df=pd.get_dummies() # default for all cate_col
df = pd.get_dummies(df, columns=['col']) # convert only for this col
df = pd.get_dummies(df, prefix=['col1', 'col2']) # specify perface
- 格式化百分数
{0.03948576 * 100:.4f}% // 3.9486%
- missing value (check and fill)
merchants['category_2'].value_counts() # check unique value
new_tran['category_2'] # show
new_tran[new_tran['category_2'].isnull().values==True] # show missing value
merchants.info() # type of the variable
- histgram
hists = merchants.hist(column="avg_sales_lag3",
xlabelsize=8, ylabelsize=15,range=(9000,10000))[0][0]
hists.set_ylabel("books", size=15)
hists.set_xlabel("rating per book", size=15)
hists.set_title("Histogram of Rating counts per book", size=15);
- '\n' 转义符:print内回行 (不空格):
a=234
b=325
c=222
print(a,'\n',b,'\n',c,sep='')
其他
https://blog.csdn.net/sinat_35512245/article/details/79685891
- standardize / scale data
scale the data:
def std(df,column = None):
mean = np.mean(df[column])
std = np.std(df[column])
df[column] = (df[column]-mean) / std
print (mean,std)
return df[column]
def scaler(df,column = None):
min_value = np.min(df[column])
max_value = np.max(df[column])
df[column] = (df[column]-min_value) / (max_value - min_value)
return df[column]
ctn_ft=['month_lag','purchase_amount','date','time'] # continous features
for feature in ctn_ft:
df[feature]= std(tran,column=feature)
print(feature)
check:
from sklearn.preprocessing import StandardScaler
from math import sqrt
df = ..
ctn = ['avg_sales_lag3','avg_purchases_lag3', 'active_months_lag3', 'avg_sales_lag6',
'avg_purchases_lag6', 'active_months_lag6','avg_sales_lag12','avg_purchases_lag12',
'active_months_lag12']
for i in ctn:
scaler = StandardScaler()
values = df[i].values
values = values.reshape(len(values), 1)
scaler = StandardScaler()
scaler = scaler.fit(values)
print('col:',i,'Mean: %f, StandardDeviation: %f' % (scaler.mean_, sqrt(scaler.var_)))
查空,代替
df.isnull().any()
df[df['col'].isnull().values==True]
df['col'] = df['col'].fillna('Z')
内存/ 大小
import psutil
print(psutil.virtual_memory())
import os
os.path.getsize('tran1.csv')/1024/1024/1024 # (in GB)