NFM原理和代码实现

原理

FM只能进行二阶特征交叉,是否可以利用深度神经网络更强的特征组合能力来改进FM模型呢?2017年新加坡国立大学基于此提出了NFM模型。
NFM主要思路是采用一个表达能力更强的函数代替FM二阶隐向量内积的部分。

image.png

更强表达能力的函数是如何实现的呢?
模型在Embedding层和DNN层中间加了BI-Interaction Pooling层。该层的作用是两两Embedding向量做元素积操作(两向量对应维度相乘),在交叉特征向量之后,对应元素求和,最后输出一个k维的特征向量。这个k维向量输入到DNN中进一步抽取高阶非线性特征。

模型结构

从模型结构的角度出发,相比于wide&deep模型,NFM对Deep部分增加了特征交叉池化层,增加了模型特征交叉的能力。


image.png

代码

import os
import numpy as np
import pandas as pd
from collections import namedtuple

import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from sklearn.preprocessing import  MinMaxScaler, LabelEncoder

##### 数据预处理
data = pd.read_csv('./data/criteo_sample.txt')

def data_processing(df, dense_features, sparse_features):
    df[dense_features] = df[dense_features].fillna(0.0)
    for f in dense_features:
        df[f] = df[f].apply(lambda x: np.log(x+1) if x > -1 else -1)
    
    df[sparse_features] = df[sparse_features].fillna("-1")
    for f in sparse_features:
        lbe = LabelEncoder()
        df[f] = lbe.fit_transform(df[f])
    return df[dense_features + sparse_features]

dense_features = [i for i in data.columns.values if 'I' in i]
sparse_features = [i for i in data.columns.values if 'C' in i]
df = data_processing(data, dense_features, sparse_features)
df['label'] = data['label']

##### 模型构建
# 使用具名元组定义特征标记
SparseFeature = namedtuple('SparseFeature', ['name', 'vocabulary_size', 'embedding_size'])
DenseFeature = namedtuple('DenseFeature', ['name', 'dimension'])
VarLenSparseFeature = namedtuple('VarLenSparseFeature', ['name', 'vocabulary_size', 'embedding_size', 'maxlen'])

class BiInteractionPooling(Layer):
    def call(self, inputs):
        """ 0.5 * (和的平方-平方的和) """
        concate_embed_values = inputs # B x n x k
        square_of_sum = tf.square(tf.reduce_sum(concate_embed_values, axis=1, keepdims=True)) # B x k
        sum_of_square = tf.reduce_sum(concate_embed_values * concate_embed_values, axis=1, keepdims=True) # B x k
        output = 0.5 * (square_of_sum - sum_of_square)
        return output
    
    def compute_output_shape(self, input_shape):
        return (None, input_shape[2])

def build_input_layers(feature_columns):
    """ 构建输入层 """
    dense_input_dict, sparse_input_dict = {}, {}
    for f in feature_columns:
        if isinstance(f, DenseFeature):
            dense_input_dict[f.name] = Input(shape=(f.dimension, ), name=f.name)
        elif isinstance(f, SparseFeature):
            sparse_input_dict[f.name] = Input(shape=(1, ), name=f.name)
    return dense_input_dict, sparse_input_dict

def build_embedding_layers(feature_columns, is_linear):
    embedding_layers_dict = {}
    # 筛选出sparse特征列
    sparse_feature_columns = list(filter(lambda x: isinstance(x, SparseFeature), feature_columns)) if feature_columns else []
    if is_linear:
        for f in sparse_feature_columns:
            embedding_layers_dict[f.name] = Embedding(f.vocabulary_size + 1, 1, name='1d_emb_' + f.name)
    else:
        for f in sparse_feature_columns:
            embedding_layers_dict[f.name] = Embedding(f.vocabulary_size + 1, f.embedding_size, name='kd_emb_' + f.name)
    return embedding_layers_dict

def concat_embedding_list(feature_columns, input_layer_dict, embedding_layer_dict, flatten=False):
    """ 拼接embedding特征 """
    sparse_feature_columns = list(filter(lambda x: isinstance(x, SparseFeature), feature_columns)) if feature_columns else []
    embedding_list = []
    for f in sparse_feature_columns:
        _input_layer = input_layer_dict[f.name] 
        _embed = embedding_layer_dict[f.name]
        embed_layer = _embed(_input_layer)
        if flatten:
            embed_layer = Flatten()(embed_layer)
        
        embedding_list.append(embed_layer)
    return embedding_list

def get_linear_logits(dense_input_dict, sparse_input_dict, sparse_feature_columns):
    concat_dense_inputs = Concatenate(axis=1)(list(dense_input_dict.values()))
    dense_logits_output = Dense(1)(concat_dense_inputs)
    
    linear_embedding_layer = build_embedding_layers(sparse_feature_columns, is_linear=True)
    sparse_1d_embed_list = []
    for f in sparse_feature_columns:
        temp_input = sparse_input_dict[f.name]
        temp_embed = Flatten()(linear_embedding_layer[f.name](temp_input))
        sparse_1d_embed_list.append(temp_embed)
    
    sparse_logits_output = Add()(sparse_1d_embed_list)
    linear_logits = Add()([dense_logits_output, sparse_logits_output])
    return linear_logits
    
def get_fm_logits(sparse_input_dict, sparse_feature_columns, dnn_embedding_layers):
    sparse_kd_embed_list = []
    for f in sparse_feature_columns:
        f_input = sparse_input_dict[f.name]
        _embed = dnn_embedding_layers[f.name](f_input)
        sparse_kd_embed_list.append(_embed)
    
    concat_sparse_kd_embed_list = Concatenate(axis=1)(sparse_kd_embed_list) # B x n x k
    fm_logits = BiInteractionPooling()(concat_sparse_kd_embed_list)
    return fm_logits

def get_bi_interaction_pooling_layer(sparse_input_dict, sparse_feature_columns, dnn_embedding_layers):
    """ sparse二阶交叉 """
    sparse_kd_embed_list = []
    for f in sparse_feature_columns:
        f_input = sparse_input_dict[f.name]
        _embed = dnn_embedding_layers[f.name](f_input)
        sparse_kd_embed_list.append(_embed)
    
    concat_sparse_kd_embed_list = Concatenate(axis=1)(sparse_kd_embed_list)
    pooling_out = BiInteractionPooling()(concat_sparse_kd_embed_list)
    return pooling_out
    
def get_dnn_logits(pooling_out):    
    # DNN层
    dnn_out = Dropout(0.5)(Dense(1024, activation='relu')(pooling_out))
    dnn_out = Dropout(0.5)(Dense(512, activation='relu')(dnn_out))
    dnn_out = Dropout(0.5)(Dense(256, activation='relu')(dnn_out))
    dnn_logits = Dense(1)(dnn_out)
    return dnn_logits

def NFM(linear_feature_columns, dnn_feature_columns):
    dense_input_dict, sparse_input_dict = build_input_layers(linear_feature_columns + dnn_feature_columns)
    
    # linear
    linear_sparse_feature_columns = list(filter(lambda x: isinstance(x, SparseFeature), linear_feature_columns))
    input_layers = list(dense_input_dict.values()) + list(sparse_input_dict.values())
    linear_logits = get_linear_logits(dense_input_dict, sparse_input_dict, linear_sparse_feature_columns)
    
    # embedding+bi
    dnn_embedding_layers = build_embedding_layers(dnn_feature_columns, is_linear=False)
    dnn_sparse_feature_columns = list(filter(lambda x: isinstance(x, SparseFeature), dnn_feature_columns))
    
    pooling_out = get_bi_interaction_pooling_layer(sparse_input_dict, dnn_sparse_feature_columns, dnn_embedding_layers)
    # BN操作
    pooling_out = BatchNormalization()(pooling_out)
    
    # DNN
    dnn_logits = get_dnn_logits(pooling_out)
    
    output_logits = Add()([linear_logits, dnn_logits])
    output_layer = Activation("sigmoid")(output_logits)
    model = Model(input_layers, output_layer)
    return model

# 定义特征列
linear_feature_columns = [SparseFeature(f, vocabulary_size=df[f].nunique(), embedding_size=4) for f in sparse_features] + \
[DenseFeature(f, 1,) for f in dense_features]

dnn_feature_columns = [SparseFeature(f, vocabulary_size=df[f].nunique(), embedding_size=4) for f in sparse_features] + \
[DenseFeature(f, 1,) for f in dense_features]

model = NFM(linear_feature_columns, dnn_feature_columns)
model.summary()

##### 模型训练
model.compile(optimizer="adam",
             loss="binary_crossentropy",
             metrics=["binary_crossentropy", tf.keras.metrics.AUC(name='auc')])

train_input = {col: df[col] for col in dense_features + sparse_features}
model.fit(train_input, df['label'].values,
         batch_size=64, epochs=5, validation_split=0.2)
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容