FM算法
因子分解机模型(Factorization Machine, FM)是一种基于矩阵分解的机器学习算法,它广泛应用于广告和推荐领域,主要解决数据稀疏的情况下如何进行特征交叉的问题。
FM的优点
1)FM可以在参数稀疏的情况下进行参数估计。
2)FM具有线性的时间复杂度。
3)FM适用于多种类型特征向量,一般输入数据包括数值特征和one-hot编码后的离散特征。FM可以调整成MF、SVD++、PITF、FPMC等模型。
FM的缺点
1)FM只能进行三阶以下的自动特征交叉,因此特征工程部分依旧无法避免。(后续延伸出了DeepFM,可以进行高阶的特征交叉)
FM推导
本节中,我们进行因子分解机模型的推导。
1)FM公式
其中,<.,.>代表维度为k的两个向量进行点积。
- w0是全局偏差
- wi是变量xi的参数
- wi,j := <vi, vj>是xi和xj变量的交叉参数。
2)模型的表达能力
k值得选择,影响了FM的表达能力。为了使模型有更好的泛化能力,在稀疏数据场景下,通常选择比较小的k。
3)完整推导过程。
相对难以理解的是第一步的转化过程。当j=i+1变为j=1时,发生了什么?
xixi多加了一次。
xixj多加了一次。
形如:共有a b c三个特征,原来是ab + ac + bc,转化为1/2 (第一项-第二项)
第一项:aa + ab + ac + ba + bb + bc + ca + cb + cc
第二项:aa + bb + cc
最后得到的公式是:
梯度
FM拥有线性时间复杂度,可以在线性的时间内完成训练和预测。
FM的网络结构
DeepFM算法
2017年哈尔滨工业大学和华为大学联合提出了DeepFM。DeepFM是wide&deep之后另一个被工业届广泛使用的双模型,相比于wide&deep,DeepFM采用FM替换了原来的Wide部分,加强了浅层神经网络的特征组合能力。
DeepFM代码实现
import os
import numpy as np
import pandas as pd
from collections import namedtuple
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
##### 数据预处理
data = pd.read_csv('./data/criteo_sample.txt')
data.head()
def data_processing(df, dense_features, sparse_features):
df[dense_features] = df[dense_features].fillna(0.0)
for f in dense_features:
df[f] = df[f].apply(lambda x: np.log(x+1) if x > -1 else -1)
df[sparse_features] = df[sparse_features].fillna("-1")
for f in sparse_features:
lbe = LabelEncoder()
df[f] = lbe.fit_transform(df[f])
return df[dense_features + sparse_features]
dense_features = [i for i in data.columns.values if 'I' in i]
sparse_features = [i for i in data.columns.values if 'C' in i]
df = data_processing(data, dense_features, sparse_features)
df['label'] = data['label']
##### 模型构建
# 使用具名元组定义特征标记
SparseFeature = namedtuple('SparseFeature', ['name', 'vocabulary_size', 'embedding_size'])
DenseFeature = namedtuple('DenseFeature', ['name', 'dimension'])
VarLenSparseFeature = namedtuple('VarLenSparseFeature', ['name', 'vocabulary_size', 'embedding_size', 'maxlen'])
class FM_Layer(Layer):
def __init__(self):
super(FM_Layer, self).__init__()
def call(self, inputs):
concate_embed_values = inputs
square_of_sum = tf.square(tf.reduce_sum(concate_embed_values, axis=1, keepdims=True))
sum_of_square = tf.reduce_sum(concate_embed_values * concate_embed_values, axis=1, keepdims=True)
output = square_of_sum - sum_of_square
output = 0.5 * tf.reduce_sum(output, axis=2, keepdims=False)
return output
def compute_output_shape(self, input_shape):
return (None, 1)
def build_input_layers(feature_columns):
""" 构建输入层 """
dense_input_dict, sparse_input_dict = {}, {}
for f in feature_columns:
if isinstance(f, DenseFeature):
dense_input_dict[f.name] = Input(shape=(f.dimension, ), name=f.name)
elif isinstance(f, SparseFeature):
sparse_input_dict[f.name] = Input(shape=(1, ), name=f.name)
return dense_input_dict, sparse_input_dict
def build_embedding_layers(feature_columns, is_linear):
embedding_layers_dict = {}
# 筛选出sparse特征列
sparse_feature_columns = list(filter(lambda x: isinstance(x, SparseFeature), feature_columns)) if feature_columns else []
if is_linear:
for f in sparse_feature_columns:
embedding_layers_dict[f.name] = Embedding(f.vocabulary_size + 1, 1, name='1d_emb_' + f.name)
else:
for f in sparse_feature_columns:
embedding_layers_dict[f.name] = Embedding(f.vocabulary_size + 1, f.embedding_size, name='kd_emb_' + f.name)
return embedding_layers_dict
def concat_embedding_list(feature_columns, input_layer_dict, embedding_layer_dict, flatten=False):
""" 拼接embedding特征 """
sparse_feature_columns = list(filter(lambda x: isinstance(x, SparseFeature), feature_columns)) if feature_columns else []
embedding_list = []
for f in sparse_feature_columns:
_input_layer = input_layer_dict[f.name]
_embed = embedding_layer_dict[f.name]
embed_layer = _embed(_input_layer)
if flatten:
embed_layer = Flatten()(embed_layer)
embedding_list.append(embed_layer)
return embedding_list
def get_linear_logits(dense_input_dict, sparse_input_dict, sparse_feature_columns):
concat_dense_inputs = Concatenate(axis=1)(list(dense_input_dict.values()))
dense_logits_output = Dense(1)(concat_dense_inputs)
linear_embedding_layer = build_embedding_layers(sparse_feature_columns, is_linear=True)
sparse_1d_embed_list = []
for f in sparse_feature_columns:
temp_input = sparse_input_dict[f.name]
temp_embed = Flatten()(linear_embedding_layer[f.name](temp_input))
sparse_1d_embed_list.append(temp_embed)
sparse_logits_output = Add()(sparse_1d_embed_list)
linear_logits = Add()([dense_logits_output, sparse_logits_output])
return linear_logits
def get_fm_logits(sparse_input_dict, sparse_feature_columns, dnn_embedding_layers):
sparse_kd_embed_list = []
for f in sparse_feature_columns:
f_input = sparse_input_dict[f.name]
_embed = dnn_embedding_layers[f.name](f_input)
sparse_kd_embed_list.append(_embed)
concat_sparse_kd_embed_list = Concatenate(axis=1)(sparse_kd_embed_list)
fm_logits = FM_Layer()(concat_sparse_kd_embed_list)
return fm_logits
def get_dnn_logits(sparse_input_dict, sparse_feature_columns, dnn_embedding_layers):
sparse_kd_embed = concat_embedding_list(sparse_feature_columns, sparse_input_dict, dnn_embedding_layers, flatten=True)
concat_sparse_kd_embed = Concatenate(axis=1)(sparse_kd_embed)
# DNN层
dnn_out = Dropout(0.5)(Dense(1024, activation='relu')(concat_sparse_kd_embed))
dnn_out = Dropout(0.5)(Dense(512, activation='relu')(dnn_out))
dnn_out = Dropout(0.5)(Dense(256, activation='relu')(dnn_out))
dnn_logits = Dense(1)(dnn_out)
return dnn_logits
def DeepFm(linear_feature_columns, dnn_feature_columns):
dense_input_dict, sparse_input_dict = build_input_layers(linear_feature_columns + dnn_feature_columns)
# linear
linear_sparse_feature_columns = list(filter(lambda x: isinstance(x, SparseFeature), linear_feature_columns))
input_layers = list(dense_input_dict.values()) + list(sparse_input_dict.values())
linear_logits = get_linear_logits(dense_input_dict, sparse_input_dict, linear_sparse_feature_columns)
# dnn
dnn_embedding_layers = build_embedding_layers(dnn_feature_columns, is_linear=False)
dnn_sparse_feature_columns = list(filter(lambda x: isinstance(x, SparseFeature), dnn_feature_columns))
dnn_logits = get_dnn_logits(sparse_input_dict, dnn_sparse_feature_columns, dnn_embedding_layers)
# fm
fm_logits = get_fm_logits(sparse_input_dict, dnn_sparse_feature_columns, dnn_embedding_layers)
output_logits = Add()([linear_logits, dnn_logits, fm_logits])
output_layer = Activation("sigmoid")(output_logits)
model = Model(input_layers, output_layer)
return model
# 定义特征列
linear_feature_columns = [SparseFeature(f, vocabulary_size=df[f].nunique(), embedding_size=4) for f in sparse_features] + \
[DenseFeature(f, 1,) for f in dense_features]
dnn_feature_columns = [SparseFeature(f, vocabulary_size=df[f].nunique(), embedding_size=4) for f in sparse_features] + \
[DenseFeature(f, 1,) for f in dense_features]
model = DeepFm(linear_feature_columns, dnn_feature_columns)
model.summary()
##### 模型训练
model.compile(optimizer="adam",
loss="binary_crossentropy",
metrics=["binary_crossentropy", tf.keras.metrics.AUC(name='auc')])
train_input = {col: df[col] for col in dense_features + sparse_features}
model.fit(train_input, df['label'].values,
batch_size=64, epochs=5, validation_split=0.2)