inception_v4网络

# -*- coding: utf-8 -*-
# https://www.w3cschool.cn/tensorflow_python/tensorflow_python-13p92sws.html      //layers
# https://my.oschina.net/u/876354/blog/1637819     GoogLeNet(v1到v4)
# https://www.zhihu.com/question/49039504          GoogLeNet实现
# https://blog.csdn.net/m0_37987687/article/details/80241893
# https://www.jianshu.com/p/cb8ebcee1b15     BN层参数保存
# https://blog.csdn.net/touch_dream/article/details/79305617      #v4

import os
import tensorflow as tf
import 数据处理_hss as data_hss
import shutil
import numpy as np
import time



def conv1d_relu(X,filters,kernel_size,strides,is_training):


    X_change = tf.layers.conv1d(X, filters, kernel_size, strides, 'same', use_bias=True, activation=None)
    X_change = tf.layers.batch_normalization(X_change,training = is_training)
    X_change = tf.nn.relu(X_change)

    return X_change

def conv1d_relu_valid(X,filters,kernel_size,strides,is_training):


    X_change = tf.layers.conv1d(X, filters, kernel_size, strides, 'valid', use_bias=True, activation=None)
    X_change = tf.layers.batch_normalization(X_change,training = is_training)
    X_change = tf.nn.relu(X_change)

    return X_change

def inception_A(layers_name,is_training):

    pool_avg = tf.layers.average_pooling1d(layers_name, 3, 1, padding='same')
    conv1_1x1 = conv1d_relu(pool_avg, 96, 1, 1 ,is_training)
    conv2_1x1 = conv1d_relu(layers_name, 96, 1, 1 ,is_training)
    conv3_1x1 = conv1d_relu(layers_name, 64, 1, 1 ,is_training)
    conv4_3x3 = conv1d_relu(conv3_1x1, 96, 3, 1 ,is_training)
    conv5_1x1 = conv1d_relu(layers_name, 64, 1, 1 ,is_training)
    conv6_3x3 = conv1d_relu(conv5_1x1, 96, 3, 1 ,is_training)
    conv7_3x3 = conv1d_relu(conv6_3x3, 96, 3, 1 ,is_training)
    Fc = tf.concat([conv1_1x1,conv2_1x1,conv4_3x3,conv7_3x3],axis=2)

    return Fc

def Reduction_A(layers_name,is_training):
    pool_max = tf.layers.max_pooling1d(layers_name, 3, 2, padding='valid')
    conv1_3x3 = conv1d_relu_valid(layers_name, 384, 3, 2 ,is_training)
    conv2_1x1 = conv1d_relu(layers_name, 192, 1, 1 ,is_training)
    conv3_3x3 = conv1d_relu(conv2_1x1, 224, 3, 1 ,is_training)
    conv4_3x3 = conv1d_relu_valid(conv3_3x3, 256, 3, 2 ,is_training)
    Fc = tf.concat([pool_max,conv1_3x3,conv4_3x3],axis=2)

    return Fc

def inception_B(layers_name,is_training):

    pool_avg = tf.layers.average_pooling1d(layers_name, 3, 1, padding='same')
    conv1_1x1 = conv1d_relu(pool_avg, 128, 1, 1 ,is_training)

    conv2_1x1 = conv1d_relu(layers_name, 384, 1, 1 ,is_training)

    conv3_1x1 = conv1d_relu(layers_name, 192, 1, 1 ,is_training)
    conv4_7x7 = conv1d_relu(conv3_1x1, 224, 7, 1 ,is_training)
    conv5_7x7 = conv1d_relu(conv4_7x7, 256, 7, 1 ,is_training)

    conv6_1x1 = conv1d_relu(layers_name, 192, 1, 1 ,is_training)
    conv7_7x7 = conv1d_relu(conv6_1x1, 192, 7, 1 ,is_training)
    conv8_7x7 = conv1d_relu(conv7_7x7, 224, 7, 1 ,is_training)
    conv9_7x7 = conv1d_relu(conv8_7x7, 224, 7, 1 ,is_training)
    conv10_7x7 = conv1d_relu(conv9_7x7, 256, 7, 1 ,is_training)

    Fc = tf.concat([conv1_1x1,conv2_1x1,conv5_7x7,conv10_7x7],axis=2)

    return Fc

def Reduction_B(layers_name,is_training):

    pool_max = tf.layers.max_pooling1d(layers_name, 3, 2, padding='valid')

    conv1_1x1 = conv1d_relu(layers_name, 192, 1, 1 ,is_training)
    conv2_3x3 = conv1d_relu_valid(conv1_1x1, 192, 3, 2 ,is_training)

    conv3_1x1 = conv1d_relu(layers_name, 256, 1, 1 ,is_training)
    conv4_7x7 = conv1d_relu(conv3_1x1, 256, 7, 1 ,is_training)
    conv5_7x7 = conv1d_relu(conv4_7x7, 320, 7, 1 ,is_training)
    conv6_3x3 = conv1d_relu_valid(conv5_7x7, 320, 3, 2 ,is_training)

    Fc = tf.concat([pool_max,conv2_3x3,conv6_3x3],axis=2)

    return Fc

def inception_C(layers_name,is_training):

    pool_avg = tf.layers.average_pooling1d(layers_name, 3, 1, padding='same')
    conv1_1x1 = conv1d_relu(pool_avg, 256, 1, 1 ,is_training)

    conv2_1x1 = conv1d_relu(layers_name, 256, 1, 1 ,is_training)

    conv3_1x1 = conv1d_relu(layers_name, 384, 1, 1 ,is_training)
    conv4_3x3 = conv1d_relu(conv3_1x1, 256, 3, 1 ,is_training)
    conv5_3x3 = conv1d_relu(conv3_1x1, 256, 3, 1 ,is_training)

    conv6_1x1 = conv1d_relu(layers_name, 384, 1, 1 ,is_training)
    conv7_3x3 = conv1d_relu(conv6_1x1, 448, 3, 1 ,is_training)
    conv8_3x3 = conv1d_relu(conv7_3x3, 512, 3, 1 ,is_training)
    conv9_3x3 = conv1d_relu(conv8_3x3, 256, 3, 1 ,is_training)
    conv10_3x3 = conv1d_relu(conv8_3x3, 256, 3, 1 ,is_training)

    Fc = tf.concat([conv1_1x1,conv2_1x1,conv4_3x3,conv5_3x3,conv9_3x3,conv10_3x3],axis=2)

    return Fc


def model_inception_v4():

    input__data = tf.placeholder(tf.float32, [None, 75*12])
    print("input__data",input__data)    #shape=(?, 900)
    input__label = tf.placeholder(tf.float32, [None, 2])
    inputdata = tf.reshape(input__data,[-1,75*12,1])
    is_training = tf.placeholder(tf.bool)
    print("is_training",is_training)
    #### stem
    conv1_3x3 = conv1d_relu_valid(inputdata, 32, 3, 2 ,is_training)
    conv2_3x3 = conv1d_relu_valid(conv1_3x3, 32, 3, 1 ,is_training)
    conv3_3x3 = conv1d_relu(conv2_3x3, 64, 3, 1 ,is_training)
    pool1_3x3 = tf.layers.max_pooling1d(conv3_3x3, 3, 2, padding='valid')
    conv4_3x3 = conv1d_relu_valid(conv3_3x3, 96, 3, 2 ,is_training)
    Fc_1 = tf.concat([pool1_3x3,conv4_3x3],axis=2)
    conv5_1x1 = conv1d_relu(Fc_1, 64, 1, 1 ,is_training)
    conv6_3x3 = conv1d_relu_valid(conv5_1x1, 96, 3, 1 ,is_training)
    conv7_1x1 = conv1d_relu(Fc_1, 64, 1, 1 ,is_training)
    conv8_7x7 = conv1d_relu(conv7_1x1, 64, 7, 1 ,is_training)
    conv9_7x7 = conv1d_relu(conv8_7x7, 64, 7, 1 ,is_training)
    conv10_3x3 = conv1d_relu_valid(conv9_7x7, 96, 3, 1 ,is_training)
    Fc_2 = tf.concat([conv6_3x3,conv10_3x3],axis=2)
    conv11_3x3 = conv1d_relu_valid(Fc_2, 192, 3, 2 ,is_training)
    pool2_3x3 = tf.layers.max_pooling1d(Fc_2, 3, 2, padding='valid')
    Fc_3 = tf.concat([conv11_3x3,pool2_3x3],axis=2)
    print("Fc_3 = ",Fc_3 )    # shape=(?, 110, 384)
    ##### Inception_A * 4
    Fc_4 = inception_A(Fc_3,is_training)
    Fc_5 = inception_A(Fc_4,is_training)
    Fc_6 = inception_A(Fc_5,is_training)
    Fc_7 = inception_A(Fc_6,is_training)
    print("Fc_7 = ",Fc_7 )    #shape=(?, 110, 384)
    ##### Reduction_A
    Fc_8 = Reduction_A(Fc_7,is_training)     # shape=(?, 54, 1024)
    print("Fc_8 = ",Fc_8)
    ##### Inception_B * 7
    Fc_9 = inception_B(Fc_8,is_training)
    Fc_10 = inception_B(Fc_9,is_training)
    Fc_11 = inception_B(Fc_10,is_training)
    Fc_12 = inception_B(Fc_11,is_training)
    Fc_13 = inception_B(Fc_12,is_training)
    Fc_14 = inception_B(Fc_13,is_training)
    Fc_15 = inception_B(Fc_14,is_training)
    print("Fc_15 = ",Fc_15 )    # shape=(?, 54, 1024)
    ##### Reduction_B
    Fc_16 = Reduction_B(Fc_15,is_training)
    print("Fc_16 = ",Fc_16 )     # shape=(?, 26, 1536)
    ##### Inception_C * 3
    Fc_17 = inception_C(Fc_16,is_training)
    Fc_18 = inception_C(Fc_17,is_training)
    Fc_19 = inception_C(Fc_18,is_training)
    print("Fc_19 = ",Fc_19 )     # shape=(?, 26, 1536)

    pool_avg = tf.layers.average_pooling1d(Fc_19, 26, 1, padding='valid')
    keepprob = tf.placeholder(tf.float32)
    pool_avg_flat = tf.reshape(pool_avg, [-1, 1536])
    pool_avg_flat = tf.layers.dropout(pool_avg_flat, keepprob)     #保存比例  1 - keepprob
    print("pool_avg_flat = ",pool_avg_flat)
    #全连接层计算
    yconv = tf.layers.dense(pool_avg_flat,2)   #,activation = tf.nn.softmax
    # print("y_conv = ",y_conv.shape)
    out = tf.nn.softmax(yconv,name = "out")   # 保存成.pb模型需要用到

    return input__data,input__label,keepprob,yconv,is_training,out

def optimization(yconv,input__label):

    # 计算交叉熵损失
    crossentropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=yconv, labels=input__label))
    # 创建优化器,通知Tensorflow在训练时要更新均值和方差的分布
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        trainstep = tf.train.AdamOptimizer(1e-4).minimize(crossentropy)
    # 创建计算准确度的操作
    correct_prediction = tf.equal(tf.argmax(yconv, 1), tf.argmax(input__label, 1))
    accuracyrate = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    return crossentropy,trainstep,accuracyrate


if __name__ == '__main__':


    start = time.time()
    file_1 = r'D:\hss_python_work\resnet_cnn\数据集\N'    #文件夹
    file_2 = r'D:\hss_python_work\resnet_cnn\数据集\S'
    file_3 = r'D:\hss_python_work\resnet_cnn\数据集\T'
    file_4 = r'D:\hss_python_work\resnet_cnn\数据集\V'
    file_5 = r'D:\hss_python_work\resnet_cnn\数据集\X'
    file_NO_X = r'D:\hss_python_work\resnet_cnn\数据集\非X'

    data_all_1,label_all_1 = data_hss.data_analysis(file_5,label_def = [0,1],proportion = 800)    # 先定义标签
    data_all_2,label_all_2 = data_hss.data_analysis(file_5,label_def = [1,0],proportion = 800)    # 先定义标签

    data_all = data_all_1 + data_all_2
    label_all = label_all_1 + label_all_2
    print("data_all = ",len(data_all))
    print("label_all = ",len(label_all))

    data_train,label_train,data_test,label_tast = data_hss.Dataset_partition(data_all,label_all)
    print(data_train.shape,label_train.shape,data_test.shape,label_tast.shape)



    sess = tf.InteractiveSession()
    input_data,input_label,keep_prob,y_conv,is_training,out = model_inception_v4()
    cross_entropy,train_step,accuracy_rate = optimization(y_conv,input_label)


    ########开始训练过程########
    # 初始化所有变量、参数
    tf.global_variables_initializer().run()
    var_list = [var for var in tf.global_variables() if "moving" in var.name]
    var_list += tf.trainable_variables()
    m_saver = tf.train.Saver(var_list=var_list, max_to_keep=5)



    ########### 训练(设置训练时Dropout的kepp_prob比率为0.5。mini-batch为50,进行2000次迭代训练,参与训练样本5万)
    model_doc = r'model'  # 存储模型的文件夹
    if not os.path.exists(model_doc):   # 模型存储文件夹
        os.makedirs(model_doc)
    else:
        shutil.rmtree(model_doc)   #先强制删除文件夹
        os.makedirs(model_doc)

    train_accuracy_all = []   #保存所有准确度
    max_acc = 0
    f = open('model/acc.txt','w')

    for i in range(10):

        batch_data_train,batch_label_train = data_hss.batch(data_train,label_train,batch_size = 200)
        # print("batch_data_train = ",batch_data_train,batch_data_train.shape)
        # print("batch_label_train = ",batch_label_train,batch_label_train.shape)

        # 循环次数是100的倍数的时候,打印
        if i%1 == 0:

            train_accuracy = accuracy_rate.eval(feed_dict={input_data:batch_data_train,input_label: batch_label_train,
                                                           keep_prob: 1,is_training : False})
            print ("-->step %d, training accuracy %.4f ,max_acc %.4f"%(i, train_accuracy,max_acc))
            print("cross_entropy = ",sess.run(cross_entropy,feed_dict={input_data:batch_data_train,input_label: batch_label_train,
                                                                       keep_prob: 1,is_training : False}))
            f.write(str(i)+', train_accuracy: '+str(train_accuracy)+'  '+str(max_acc) +'\n')
            # #保存最近的5次模型
            # m_saver.save(sess, './model/model.ckpt', global_step=i)
            #保存准确度最高的5次模型
            if train_accuracy >= max_acc :
                max_acc = train_accuracy
            if train_accuracy >= max_acc - 0.04:
                m_saver.save(sess, './model/model.ckpt', global_step=i)

            train_accuracy_all.append(train_accuracy)

        if max_acc >= 0.95 and train_accuracy < 0.5:   #训练准确度过了最高点并下降,就停止训练
            print("break reason 1")
            break
        if (len(train_accuracy_all) >= 5      #训练准确度大于5次,且最后的5次准确度全在95%以上,就停止
            and train_accuracy_all[len(train_accuracy_all) - 1] > 0.99
            and train_accuracy_all[len(train_accuracy_all) - 2] > 0.99
            and train_accuracy_all[len(train_accuracy_all) - 3] > 0.99
            and train_accuracy_all[len(train_accuracy_all) - 4] > 0.99
            and train_accuracy_all[len(train_accuracy_all) - 5] > 0.99) :
            # print(train_accuracy_all)
            print("break reason 2")
            break

        # 执行训练模型
        train_step_,loss = sess.run([train_step,cross_entropy], feed_dict={input_data:batch_data_train,
                                                                                   input_label: batch_label_train,
                                                                                   keep_prob: 0.2,
                                                                                   is_training : True})


        # 打印测试集正确率 ,全部训练完成之后,在最终测试集上进行全面测试,得到整体的分类准确率
        # print(train_accuracy_all)
        if max_acc >= 0.9 and train_accuracy < 0.5:   #训练准确度过了最高点并下降,就停止训练
            break
    f.close()


    #######模型读回及预测

    model_file=tf.train.latest_checkpoint('model/')
    m_saver.restore(sess,model_file)


    a = 0   # 预测对的个数
    TP = 0  # 预测正确的病症个数
    FN_TP = 0 # 原标签中有病症的个数
    TN = 0  # 预测正确的非病症个数
    TN_FP = 0 # 原标签中正常的个数
    sensibility = 0  # 敏感性
    specificity = 0  # 特异性

    #### 批量心拍预测
    output = sess.run(y_conv , feed_dict = {input_data:data_test, keep_prob: 1.0,is_training : False})
    print("output = ",output)
    output = sess.run(tf.nn.softmax(output))
    output = np.round(output)  #np.round(output)  #取最接近的整数
    print("output = ",output)
    print("label_tast = ",label_tast)

    for i in range(0,len(data_test)):
        if label_tast[i][0] == output[i][0] and label_tast[i][1] == output[i][1] :
            a +=1
        if label_tast[i][0] == output[i][0] and output[i][0] == 1:  #敏感性
            TP += 1
        if label_tast[i][0] == 1 :
            FN_TP += 1
        if label_tast[i][1] == output[i][1] and output[i][1] == 1:  #特异性
            TN += 1
        if label_tast[i][1] == 1 :
            TN_FP += 1

    # ### 单个心拍预测
    # single_data = np.empty([1,75])
    # for i in range(0,len(data_test)):
    #
    #     single_data[0] = data_test[i]
    #     # print("single_data = ",single_data)
    #     # print("single_label = ",single_label)
    #
    #     output = sess.run(y_conv , feed_dict = {input_data:single_data, keep_prob: 1.0,is_training : False})
    #     # print("output = ",output)
    #     output = sess.run(tf.nn.softmax(output))
    #     output = np.round(output)  #np.round(output)  #取最接近的整数
    #     print(i,"/",len(data_test)-1,"  output = ",output,"single_label = ",label_tast[i])
    #     if label_tast[i][0] == output[0][0] and label_tast[i][1] == output[0][1] :
    #         a +=1
    #
    #     if label_tast[i][0] == output[0][0] and output[0][0] == 1:  #敏感性
    #         TP += 1
    #     if label_tast[i][0] == 1 :
    #         FN_TP += 1
    #
    #     if label_tast[i][1] == output[0][1] and output[0][1] == 1:  #特异性
    #         TN += 1
    #     if label_tast[i][1] == 1 :
    #         TN_FP += 1



    print("len(data_test) = ",len(data_test),"a =",a)
    print("sensibility = ",TP/FN_TP,"specificity =",TN/TN_FP)


    end = time.time()
    print("程序运行时间:",end - start)

©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 215,245评论 6 497
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 91,749评论 3 391
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 160,960评论 0 350
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 57,575评论 1 288
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 66,668评论 6 388
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 50,670评论 1 294
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 39,664评论 3 415
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 38,422评论 0 270
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 44,864评论 1 307
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 37,178评论 2 331
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 39,340评论 1 344
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 35,015评论 5 340
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 40,646评论 3 323
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 31,265评论 0 21
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 32,494评论 1 268
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 47,261评论 2 368
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 44,206评论 2 352

推荐阅读更多精彩内容