考虑的问题:
(1) 假如query就只有一个品类关键词,那么使用关键词替换这种方式就是不合理的,这种情况应该怎么办?想到的方式是这种情况就直接使用随机采样或者之前基于类目的方式进行负样本采样,但是这就会带来训练集难易分布不均衡的问题。就像在深度估计中,不能将近景和远景的数据放在一个训练集中。当我们需要远景深度的检测网络的时候,即使有近景的网络也是不能直接使用的,而是需要finetune操作。我认为在推荐算法场景中也不可以将这一点混淆,因为现阶段的神经网络都过于简单,而且数据的丰富度也不够,不可能训练出一个如此完善的神经网络。因此,还是需要将两种难度的网络分开才行
(2) 验证集不合理的问题:之前mvdssm网络的验证集结果应该也是不合适的,需要重新收集验证集。最好的方式是按照训练集的生成过程来采集,可不可以按照query_id来划分呢?构造数据集
306 307 308 309 310 311 313 314
create table graph_embedding.hs_tmp_306 as
select graph_embedding:hs_split_1(item_id, pair, "|") as (item_id, word, weight) from
(select bi_udf:bi_split_value(item_id, tag_result, "%") as (item_id, pair) from
(select item_id, search_kg:alinlp_termweight_ecom(title, "%", "{weight}", 1, 0) as tag_result from graph_embedding.hs_tmp_303 where lengthb(title) > 0)a)b where lengthb(b.pair) > 0;
create table graph_embedding.hs_tmp_307 as
select graph_embedding:hs_split_1(index, pair, "|") as (index, word, weight) from
(select bi_udf:bi_split_value(index, tag_result, "%") as (index, pair) from
(select index, search_kg:alinlp_termweight_ecom(query, "%", "{weight}", 1, 0) as tag_result from graph_embedding.hs_tmp_304 where lengthb(query) > 0)a)b where lengthb(b.pair) > 0;
create table graph_embedding.hs_dssm_dic_query_8 as
select graph_embedding:hs_split_1(query_id, pair, "|") as (query_id, word, weight) from
(select bi_udf:bi_split_value(query_id, tag_result, "%") as (query_id, pair) from
(select query_id, search_kg:alinlp_termweight_ecom(query, "%", "{weight}", 1, 0) as tag_result from graph_embedding.hs_dssm_dic_query_7 where lengthb(query) > 0)a)b where lengthb(b.pair) > 0;
create table hs_tmp_308 as select item_id as id, word, search_kg:alinlp_word_embedding(hs_return_clean(word), "100", "CONTENT_SEARCH") as emb from hs_tmp_306;
create table hs_tmp_309 as select index as id, word, search_kg:alinlp_word_embedding(hs_return_clean(word), "100", "CONTENT_SEARCH") as emb from hs_tmp_307;
create table hs_dssm_dic_query_9 as select query_id as id, word, search_kg:alinlp_word_embedding(hs_return_clean(word), "100", "CONTENT_SEARCH") as emb from hs_dssm_dic_query_8;
create table hs_tmp_310 as
select b.id, a.word, b.emb, a.weight, graph_embedding:change_weight_query_key_1(a.word, a.weight) as new_weight from hs_tmp_306 a join hs_tmp_308 b on a.item_id == b.id and a.word == b.word;
create table hs_tmp_311 as
select b.id, a.word, b.emb, a.weight, graph_embedding:change_weight_query_key_1(a.word, a.weight) as new_weight from hs_tmp_307 a join hs_tmp_309 b on a.index == b.id and a.word == b.word;
create table hs_dssm_dic_query_10 as
select b.id, a.word, b.emb, a.weight, graph_embedding:change_weight_query_key_1(a.word, a.weight) as new_weight from hs_dssm_dic_query_8 a join hs_dssm_dic_query_9 b on a.query_id == b.id and a.word == b.word;
create table hs_tmp_312 as
select id, return_concat_1(new_weight, emb) as title_emb from hs_tmp_310 group by id;
create table hs_tmp_313 as
select id, return_concat_1(new_weight, emb) as query_emb from hs_tmp_311 group by id;
create table hs_dssm_dic_query_11 as
select id, return_concat_1(new_weight, emb) as query_emb from hs_dssm_dic_query_10 group by id;
train_set : | se_keyword_mainse_ws | title_mainse_ws | label |
-> "se_keyword_mainse_ws,title_mainse_ws, pic_mainse_ws, label"
drop table hs_tmp_314;
yes
create table hs_tmp_314 as select e.query_emb as se_keyword_mainse_ws, e.title_emb as title_mainse_ws, f.pic_emb as pic_mainse_ws, e.label from (select c., d.title_emb from (select a., b.query_emb from hs_tmp_300 a join hs_tmp_313 b on a.index == b.id)c join hs_tmp_312 d on c.item_id == d.id)e join hs_tmp_348 f on e.item_id == f.item_id;
drop table hs_train_data_dssm_v2_8;
yes
drop table hs_test_data_dssm_v2_8;
yes
PAI -name split -project algo_public
-DinputTableName=graph_embedding.hs_tmp_314
-Doutput1TableName=graph_embedding.hs_train_data_dssm_v2_8
-Doutput2TableName=graph_embedding.hs_test_data_dssm_v2_8
-Dfraction=0.9
-DmemSizePerCore=4096
-DcoreNum=100
;
开始训练
pai -name tensorflow140 -Dscript="file:///home/hengsong/origin_deep_cluster_odps_8.tar.gz" -DentryFile="train_inference_v8.py" -Dcluster='{"worker":{"count":50, "cpu":200, "memory":4000}, "ps":{"count":10, "cpu":200, "memory":5000}}' -DuseSparseClusterSchema=True -DenableDynamicCluster=True -Dtables="odps://graph_embedding/tables/hs_train_data_dssm_v2_7,odps://graph_embedding/tables/hs_test_data_dssm_v2_7,odps://graph_embedding/tables/hs_tmp_267" -Doutputs="odps://graph_embedding/tables/hs_dssm_result_4" -DcheckpointDir="oss://bucket-automl/hengsong/?role_arn=acs:ram::1293303983251548:role/graph2018&host=cn-hangzhou.oss-internal.aliyun-inc.com" -DuserDefinedParameters="--learning_rate=3e-4 --batch_size=1024 --is_save_model=True --attention_type=1 --num_epochs=1 --ckpt=hs_ugc_video_4e_8.ckpt-1" -DuseSparseClusterSchema=True;
num_epoch : 10
pai -name tensorflow140 -Dscript="file:///home/hengsong/origin_deep_cluster_odps_8.tar.gz" -DentryFile="train_inference_v9.py" -Dcluster='{"worker":{"count":1, "cpu":200, "memory":4000}, "ps":{"count":1, "cpu":200, "memory":5000}}' -DuseSparseClusterSchema=True -DenableDynamicCluster=True -Dtables="odps://graph_embedding/tables/hs_train_data_dssm_v2_7,odps://graph_embedding/tables/hs_test_data_dssm_v2_7,odps://graph_embedding/tables/hs_tmp_267" -Doutputs="odps://graph_embedding/tables/hs_dssm_result_5" -DcheckpointDir="oss://bucket-automl/hengsong/?role_arn=acs:ram::1293303983251548:role/graph2018&host=cn-hangzhou.oss-internal.aliyun-inc.com" -DuserDefinedParameters="--learning_rate=3e-4 --batch_size=1024 --is_save_model=True --attention_type=1 --num_epochs=1 --ckpt=hs_ugc_video_4e_10.ckpt" -DuseSparseClusterSchema=True;