微软于2016年提出了DeepCrossing模型。DeepCrossing完整解决了特征工程、稀疏特征稠密化、多层神经网络优化目标等应用问题,为以后的深度推荐系统打下了良好的基础。
DeepCrossing网络结构
DeepCrossing的网络设计要解决如下问题:
- 稀疏特征向量无法输入神经网络,稀疏特征向量稠密化问题。
- 如何学习到交叉特征的问题。
- 如何在输出层完成优化目标的问题。
解决方法:
- 稀疏特征输入Input层后进入Embedding层获取稠密嵌入向量。
- 通过多层残差网络对特征向量的各个维度进行充分交叉组合,模型抓取到更多的非线性特征和组合特征信息。
- CTR预估这种二分类问题,Score层采用sigmoid;多分类问题,Score层采用Softmax。
相关代码
import os
import numpy as np
import pandas as pd
from collections import namedtuple
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
# 使用具名元组定义特征标记
SparseFeature = namedtuple('SparseFeature', ['name', 'vocabulary_size', 'embedding_size'])
DenseFeature = namedtuple('DenseFeature', ['name', 'dimension'])
VarLenSparseFeature = namedtuple('VarLenSparseFeature', ['name', 'vocabulary_size', 'embedding_size', 'maxlen'])
##### 数据预处理
data = pd.read_csv('./data/criteo_sample.txt')
data.head()
def data_processing(df, dense_features, sparse_features):
df[dense_features] = df[dense_features].fillna(0.0)
for f in dense_features:
df[f] = df[f].apply(lambda x: np.log(x+1) if x > -1 else -1)
df[sparse_features] = df[sparse_features].fillna("-1")
for f in sparse_features:
lbe = LabelEncoder()
df[f] = lbe.fit_transform(df[f])
return df[dense_features + sparse_features]
dense_features = [i for i in data.columns.values if 'I' in i]
sparse_features = [i for i in data.columns.values if 'C' in i]
df = data_processing(data, dense_features, sparse_features)
df['label'] = data['label']
##### 模型构建
def build_input_layers(feature_columns):
""" 构建输入层 """
dense_input_dict, sparse_input_dict = {}, {}
for f in feature_columns:
if isinstance(f, DenseFeature):
dense_input_dict[f.name] = Input(shape=(f.dimension, ), name=f.name)
elif isinstance(f, SparseFeature):
sparse_input_dict[f.name] = Input(shape=(1, ), name=f.name)
return dense_input_dict, sparse_input_dict
def build_embedding_layers(feature_columns):
embedding_layers_dict = {}
# 筛选出sparse特征列
sparse_feature_columns = list(filter(lambda x: isinstance(x, SparseFeature), feature_columns)) if feature_columns else []
for f in sparse_feature_columns:
embedding_layers_dict[f.name] = Embedding(f.vocabulary_size + 1, f.embedding_size, name='emb_' + f.name)
return embedding_layers_dict
def concat_embedding_list(feature_columns, input_layer_dict, embedding_layer_dict, flatten=False):
""" 拼接embedding特征 """
sparse_feature_columns = list(filter(lambda x: isinstance(x, SparseFeature), feature_columns)) if feature_columns else []
embedding_list = []
for f in sparse_feature_columns:
_input_layer = input_layer_dict[f.name]
_embed = embedding_layer_dict[f.name]
embed_layer = _embed(_input_layer)
if flatten:
embed_layer = Flatten()(embed_layer)
embedding_list.append(embed_layer)
return embedding_list
def get_dnn_logits(dnn_inputs, block_nums=3):
dnn_out = dnn_inputs
for i in range(block_nums):
dnn_out = ResidualBlock(64)(dnn_out)
# logits
dnn_logits = Dense(1, activation='sigmoid')(dnn_out)
return dnn_logits
class ResidualBlock(Layer):
def __init__(self, units):
super(ResidualBlock, self).__init__()
self.units = units
def build(self, input_shape):
out_dim = input_shape[-1]
self.dnn1 = Dense(self.units, activation='relu')
self.dnn2 = Dense(out_dim, activation='relu')
def call(self, inputs):
x = inputs
x = self.dnn1(x)
x = self.dnn2(x)
x = Activation('relu')(x + inputs)
return x
def DeepCrossing(input_feature_columns):
dense_input_dict, sparse_input_dict = build_input_layers(input_feature_columns)
input_layers_list = list(dense_input_dict.values()) + list(sparse_input_dict.values())
embedding_layer_dict = build_embedding_layers(input_feature_columns)
# 拼接dense特征
dense_list = list(dense_input_dict.values())
dense_inputs = Concatenate(axis=1)(dense_list)
# 拼接sparse特征
sparse_list = concat_embedding_list(input_feature_columns, sparse_input_dict, embedding_layer_dict, flatten=True)
sparse_inputs = Concatenate(axis=1)(sparse_list)
print('dense_inputs, sparse_inputs: ', dense_inputs, sparse_inputs)
# sparse特征和dense特征拼接
dnn_inputs = Concatenate(axis=1)([dense_inputs, sparse_inputs])
# 输入dnn层sdf\
output_layers = get_dnn_logits(dnn_inputs, block_nums=3)
model = Model(input_layers_list, output_layers)
return model
# 定义特征列
input_feature_columns = [SparseFeature(f, vocabulary_size=df[f].nunique(), embedding_size=4) for f in sparse_features] + \
[DenseFeature(f, 1) for f in dense_features]
input_feature_columns
model = DeepCrossing(input_feature_columns)
model.summary()
##### 模型训练
model.compile(optimizer="adam",
loss="binary_crossentropy",
metrics=["binary_crossentropy", tf.keras.metrics.AUC(name='auc')])
train_input = {col: df[col] for col in dense_features + sparse_features}
model.fit(train_input, df['label'].values,
batch_size=64, epochs=5, validation_split=0.2)